prompt
stringlengths
19
879k
completion
stringlengths
3
53.8k
api
stringlengths
8
59
#!/apps/anaconda3/bin/python3 import warnings warnings.filterwarnings("ignore") import numpy as np import sys import re import time import json import multiprocessing import collections import serial import socket import py3toolbox as tb import pyqtgraph as pg from random import randint from multiprocessing import Process, Pipe from pyqtgraph.Qt import QtGui, QtCore from collections import deque , defaultdict def get_config(): config = { '_DATA_SOURCE_' : ['SERIAL', 'FILE', 'NETWORK'], 'DATA_SOURCE' : 'FILE', 'NETWORK_HOST' : '192.168.1.142', 'NETWORK_PORT' : 80, 'SERIAL_PORT' : 'COM3', 'SERIAL_RATE' : 115200, 'SERIAL_TIMEOUT' : 1, 'LOG_SOURCE_DATA' : False, 'DATA_FEED_WAIT' : True, 'DROP_FRAME' : True, 'REFRESH_RATE' : 20, 'DATA_FILE' : './sample.data', 'LOG_FILE' : '/tmp/1.log', 'FILE_TEST' : True, 'DEBUG_MODE' : False, 'ANTIALIAS' : True, 'PEN_WIDTH' : 0, 'WIN_SIZE_X' : 1400, 'WIN_SIZE_Y' : 800, 'WIN_TITLE' : 'Realtime Data Visualizer', 'CUSTOM_CONFIG' : False, 'layouts' : { 'win_layout' : (5,3), 'boards' : { '1' : { 'layout' : (1,1,1,1), 'max_entries' : 100 }, '2' : { 'layout' : (1,2,1,1), 'max_entries' : 100 }, '3' : { 'layout' : (1,3,1,1), 'max_entries' : 100 }, '4' : { 'layout' : (2,1,1,1), 'max_entries' : 100 }, '5' : { 'layout' : (2,2,1,1), 'max_entries' : 100 }, '6' : { 'layout' : (2,3,1,1), 'max_entries' : 100 }, '7' : { 'layout' : (3,1,1,1), 'max_entries' : 100 }, '8' : { 'layout' : (3,2,1,1), 'max_entries' : 100 }, '9' : { 'layout' : (3,3,1,1), 'max_entries' : 100 }, '10' : { 'layout' : (4,1,1,1), 'max_entries' : 100 }, '11' : { 'layout' : (4,2,1,1), 'max_entries' : 100 }, '12' : { 'layout' : (4,3,1,1), 'max_entries' : 100 }, '13' : { 'layout' : (5,1,1,3), 'max_entries' : 400 } } }, 'data_config' : { 'ax' : { 'board_id' : '1', 'color' : 'b' }, 'ay' : { 'board_id' : '2', 'color' : 'g' }, 'az' : { 'board_id' : '3', 'color' : 'r' }, 'gx' : { 'board_id' : '4', 'color' : 'c' }, 'gy' : { 'board_id' : '5', 'color' : 'm' }, 'gz' : { 'board_id' : '6', 'color' : 'y' }, 'ax_raw' : { 'board_id' : '1', 'color' : (60,60,60) }, 'ay_raw' : { 'board_id' : '2', 'color' : (60,60,60) }, 'az_raw' : { 'board_id' : '3', 'color' : (60,60,60) }, 'gx_raw' : { 'board_id' : '4', 'color' : (60,60,60) }, 'gy_raw' : { 'board_id' : '5', 'color' : (60,60,60) }, 'gz_raw' : { 'board_id' : '6', 'color' : (60,60,60) }, 'Pitch' : { 'board_id' : '7', 'color' : 'r' }, 'Yaw' : { 'board_id' : '8', 'color' : 'g' }, 'Roll' : { 'board_id' : '9', 'color' : 'b' }, 'err_P' : { 'board_id' :'10', 'color' : 'r' }, 'err_I' : { 'board_id' :'11', 'color' : 'g' }, 'err_D' : { 'board_id' :'12', 'color' : 'b' }, 'Error' : { 'board_id' :'13', 'color' : 'w' }, 'PID' : { 'board_id' :'13', 'color' : 'r' }, 'g_int' : { 'board_id' :'13', 'color' : 'g' }, 'g_pitch' : { 'board_id' :'13', 'color' : 'b' } } } return config class DataReader(multiprocessing.Process): def __init__(self, out_q) : multiprocessing.Process.__init__(self) self.config = get_config() self.out_q = out_q self.data_dic = {} self.data_count = 0 self.sample_rate = 0 self.receive_time = time.time() def read_serial_data(self, serial_port, serial_rate, serial_timeout): if self.config['LOG_SOURCE_DATA'] == True and self.config['FILE_TEST'] == False : tb.rm_file(self.config['DATA_FILE']) ser = serial.Serial(port=serial_port , baudrate=serial_rate, timeout=serial_timeout) print("connected to: " + ser.portstr) this_line = "" while True: try : one_byte = ser.read(1) if len(one_byte) < 1 : continue if one_byte == b"\r": #method should returns bytes self.data_count +=1 # print (this_line) parsed_data = self.parse_csv_data(this_line) if parsed_data is not None: self.push_out_q(parsed_data) if self.config['LOG_SOURCE_DATA'] == True: tb.write_file(file_name=self.config['DATA_FILE'], text=this_line + "\n", mode="a") this_line = "" else: this_line += one_byte.decode('ascii') except Exception as err: print (err) time.sleep(1) pass def read_network_data(self, host, port) : client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect((host, port)) while True: this_line = client.recv(4096).decode("utf-8") parsed_data = self.parse_csv_data(this_line) if parsed_data is not None: self.push_out_q(parsed_data) if self.config['LOG_SOURCE_DATA'] == True: tb.write_file(file_name=self.config['DATA_FILE'], text=this_line + "\n", mode="a") pass def read_file_data(self, data_file): data_fh = open(data_file, "r") for data_line in data_fh.read().splitlines(True): if (len(data_line) >= 1) : self.push_out_q(self.parse_csv_data(data_line)) def parse_csv_data(self, data_str): data_json = None for rec in data_str.split(','): if len(rec) < 1 : continue m = re.match("\s*(\w+)\s*\=\s*([^\,]+)\s*", rec) if m: self.data_dic[m.group(1)] = float(m.group(2).rstrip()) data_json = json.dumps(self.data_dic) else: return None if self.config['DEBUG_MODE'] == 'Y' : tb.write_log(self.config['LOG_FILE'],data_json) return data_json def push_out_q(self, data_message): #self.sample_rate = int(1 / (time.time() - self.receive_time)) #print ('sample rate = ', self.sample_rate, data_message) #self.receive_time = time.time() self.out_q.put(data_message) #self.out_q.send(data_message) def run(self): #self.create_task_sync_files() if self.config['DATA_SOURCE'] == 'FILE': self.read_file_data(self.config['DATA_FILE']) elif self.config['DATA_SOURCE'] == 'SERIAL' : self.read_serial_data(self.config['SERIAL_PORT'],self.config['SERIAL_RATE'], int(self.config['SERIAL_TIMEOUT'])) elif self.config['DATA_SOURCE'] == 'NETWORK': self.read_network_data(self.config['NETWORK_HOST'],self.config['NETWORK_PORT']) class Visualizer(multiprocessing.Process): def __init__(self,in_q=None): multiprocessing.Process.__init__(self) self.config = get_config() self.in_q = in_q self.trace_data = {} # data stuff self.fps = 0 # PyQtGRaph stuff self.app = QtGui.QApplication([]) self.win = pg.GraphicsWindow(title="Basic plotting") self.win.addLayout(row=self.config['layouts']['win_layout'][0], col=self.config['layouts']['win_layout'][1]) self.win.resize(self.config['WIN_SIZE_X'],self.config['WIN_SIZE_Y']) self.win.setWindowTitle(self.config['WIN_TITLE']) pg.setConfigOptions(antialias=self.config['ANTIALIAS']) self.init_plots() # for FPS calculation self.last = time.time() def init_plots(self): self.boards = {} for b in self.config['layouts']['boards'].keys(): cfg = self.config['layouts']['boards'][b]['layout'] t_row = cfg[0] t_col = cfg[1] t_rowspan = cfg[2] t_colspan = cfg[3] title = None for d in self.config['data_config'].keys(): if self.config['data_config'][d]['board_id'] == b : if title is None : title=d else: title += ',' + d self.boards[b] = self.win.addPlot(row=t_row, col=t_col, rowspan=t_rowspan, colspan=t_colspan, title=title) def init_trace_data (self, key): max_entries = self.config['layouts']['boards'][self.config['data_config'][key]['board_id']]['max_entries'] self.trace_data[key] = {} self.trace_data[key]['color'] = self.config['data_config'][key]['color'] self.trace_data[key]['x_data'] =
np.arange(0,max_entries,1)
numpy.arange
import numpy as np ## requires pytest import sys sys.path.append('../') from line_scan import line_scan equal = np.testing.assert_array_equal ''' I line can go from.... lr - left to right rl = right to left ud - up to down du - down to up mg - slope greater than 1 ml - slope less than one me - slope equal to one Or it can be a completely... h, horizontal v, vertical The following tests account for all possible combinations of orientation and slope. ''' ############## lr_ud def test_lr_ud_mg(): a, b = [np.array([1, 10]), np.array([4, 2])] expected_answer = [[1, 10], [1, 9], [1, 8], [2, 7], [2, 6], [2, 5], [3, 4], [3, 3], [4, 2]] equal(np.array(line_scan(a, b)), np.array(expected_answer)) def test_lr_ud_ml(): a, b = np.array([1, 10]), np.array([10, 5]) expected_answer = [[1, 10], [2, 9], [3, 8], [4, 8], [5, 7], [6, 7], [7, 6], [8, 6], [9, 5], [10, 5]] equal(np.array(line_scan(a, b)), np.array(expected_answer)) def test_lr_ud_me(): a, b = np.array([1, 10]), np.array([10, 1]) expected_answer = [[1, 10], [2, 9], [3, 8], [4, 7], [5, 6], [6, 5], [7, 4], [8, 3], [9, 2], [10, 1]] equal(np.array(line_scan(a, b)), np.array(expected_answer)) ############## lr_du def test_lr_du_mg(): a, b = [np.array([1, 2]), np.array([4, 10])] expected_answer = [[1, 2], [1, 3], [1, 4], [2, 5], [2, 6], [2, 7], [3, 8], [3, 9], [4, 10]] equal(np.array(line_scan(a, b)), np.array(expected_answer)) def test_lr_du_ml(): a, b = [np.array([1, 5]), np.array([10, 10])] expected_answer = [[1, 5], [2, 5], [3, 6], [4, 6], [5, 7], [6, 7], [7, 8], [8, 8], [9, 9], [10, 10]] equal(np.array(line_scan(a, b)), np.array(expected_answer)) def test_lr_du_me(): a, b = [np.array([1, 1]), np.array([10, 10])] expected_answer = [[1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 7], [8, 8], [9, 9], [10, 10]] equal(np.array(line_scan(a, b)), np.array(expected_answer)) ############## rl_ud def test_rl_ud_mg(): a, b = [np.array([4, 10]), np.array([1, 2])] expected_answer = [[4, 10], [3, 9], [3, 8], [2, 7], [2, 6], [2, 5], [1, 4]] equal(np.array(line_scan(a, b)), np.array(expected_answer)) def test_rl_ud_ml(): a, b = [np.array([8, 10]), np.array([1, 5])] expected_answer = [[8, 10], [7, 9], [6, 8], [5, 7], [4, 7], [3, 6]] equal(np.array(line_scan(a, b)), np.array(expected_answer)) def test_rl_ud_me(): a, b = [np.array([8, 10]), np.array([4, 5])] expected_answer = [[8, 10], [7, 9], [6, 8], [5, 7]] equal(np.array(line_scan(a, b)), np.array(expected_answer)) ############## rl_du def test_rl_du_mg(): a, b = [np.array([4, 2]), np.array([1, 10])] expected_answer = [[4, 2], [3, 3], [3, 4], [2, 5], [2, 6], [2, 7], [1, 8]] equal(np.array(line_scan(a, b)), np.array(expected_answer)) def test_rl_du_ml(): #-0.444444444444 a, b = [np.array([10, 2]), np.array([1, 6])] expected_answer = [[10, 2], [9, 2], [8, 2], [7, 3], [6, 3], [5, 4], [4, 4], [3, 5]] equal(np.array(line_scan(a, b)), np.array(expected_answer)) def test_rl_du_me(): a, b = [np.array([11, 5]), np.array([1, 15])] expected_answer = [[11, 5], [10, 6], [9, 7], [8, 8], [7, 9], [6, 10], [5, 11], [4, 12], [3, 13]] equal(np.array(line_scan(a, b)), np.array(expected_answer)) ############### h def test_rl_h(): a, b = [np.array([10, 4]), np.array([2, 4])] expected_answer = [[10, 4], [9, 4], [8, 4], [7, 4], [6, 4], [5, 4], [4, 4]] equal(np.array(line_scan(a, b)), np.array(expected_answer)) def test_lr_h(): a, b = [np.array([2, 4]), np.array([10, 4])] expected_answer = [[2, 4], [3, 4], [4, 4], [5, 4], [6, 4], [7, 4], [8, 4], [9, 4], [10, 4]] equal(np.array(line_scan(a, b)), np.array(expected_answer)) ############### v def test_v_du(): a, b = [np.array([10, 2]), np.array([10, 8])] expected_answer = [[10, 2], [10, 3], [10, 4], [10, 5], [10, 6], [10, 7], [10, 8]] equal(np.array(line_scan(a, b)), np.array(expected_answer)) def test_v_ud(): a, b = [np.array([10, 10]), np.array([10, 2])] expected_answer = [[10, 10], [10, 9], [10, 8], [10, 7], [10, 6], [10, 5], [10, 4]] equal(np.array(line_scan(a, b)), np.array(expected_answer)) ################################################################################ points = {} points['lrudmg'] = [np.array([1,10]), np.array([4,2])] points['lrudml'] = [np.array([1,10]), np.array([10,5])] points['lrudme'] = [np.array([1,10]), np.array([10,1])] points['lrdumg'] = [np.array([1,2]), np.array([4,10])] points['lrduml'] = [np.array([1,5]), np.array([10,10])] points['lrdume'] = [np.array([1,1]), np.array([10,10])] points['rludmg'] = [ np.array([4,10]), np.array([1,2])] points['rludml'] = [ np.array([8,10]), np.array([1,5])] points['rludme'] = [np.array([8,10]), np.array([4,5])] points['rldumg'] = [ np.array([4,2]), np.array([1,10])] points['rlduml'] = [
np.array([10,2])
numpy.array
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import hashlib from typing import List, Tuple, Any, Dict, Callable import numpy as np from . import utils from . import corefuncs from .. import instrumentation as inst from ..common import tools from ..common.typetools import ArrayLike class ArtificialVariable: # pylint: disable=too-many-instance-attributes,too-many-arguments # TODO: refactor, this is not more used for instrumentation, so using the # Variable framework is not necessary def __init__(self, dimension: int, num_blocks: int, block_dimension: int, translation_factor: float, rotation: bool, hashing: bool, only_index_transform: bool) -> None: self._dimension = dimension self._transforms: List[utils.Transform] = [] self.rotation = rotation self.translation_factor = translation_factor self.num_blocks = num_blocks self.block_dimension = block_dimension self.only_index_transform = only_index_transform self.hashing = hashing self.dimension = self._dimension if not self.hashing else 1 # external dim? def _initialize(self) -> None: """Delayed initialization of the transforms to avoid slowing down the instance creation (makes unit testing much faster). This functions creates the random transform used upon each block (translation + optional rotation). """ # use random indices for blocks indices = np.random.choice(self._dimension, self.block_dimension * self.num_blocks, replace=False).tolist() indices.sort() # keep the indices sorted sorted so that blocks do not overlap for transform_inds in tools.grouper(indices, n=self.block_dimension): self._transforms.append(utils.Transform(transform_inds, translation_factor=self.translation_factor, rotation=self.rotation)) def process(self, data: ArrayLike, deterministic: bool = True) -> np.ndarray: # pylint: disable=unused-argument if not self._transforms: self._initialize() if self.hashing: state =
np.random.get_state()
numpy.random.get_state
# This initializes the problem class for SWE import numpy as np import matplotlib.pyplot as plt from matplotlib import animation from mpl_toolkits.mplot3d import Axes3D from parameters import Nx, Ny, Lx, Ly from parameters import rho, grav, dt, dx, dy, ft from parameters import K from parameters import plot_viz, num_steps_per_plot, num_samples, num_train # Common functions for spatial discretizations def state_reconstruction(q,Nx,Ny): # Weno5 pad = 3 qtemp = periodic_bc(q,pad) # Smoothness indicators in x beta_0 = 13.0/12.0*(qtemp[pad-2:pad+Nx-2,:]-2.0*qtemp[pad-1:pad+Nx-1,:]+qtemp[pad:Nx+pad,:])**2 \ + 1.0/4.0*(qtemp[pad-2:pad+Nx-2,:]-4.0*qtemp[pad-1:pad+Nx-1,:]+3.0*qtemp[pad:Nx+pad,:])**2 beta_1 = 13.0/12.0*(qtemp[pad-1:pad+Nx-1,:]-2.0*qtemp[pad:pad+Nx,:]+qtemp[pad+1:Nx+pad+1,:])**2 \ + 1.0/4.0*(qtemp[pad-1:pad+Nx-1,:]-qtemp[pad+1:pad+Nx+1,:])**2 beta_2 = 13.0/12.0*(qtemp[pad:pad+Nx,:]-2.0*qtemp[pad+1:pad+Nx+1,:]+qtemp[pad+2:Nx+pad+2,:])**2 \ + 1.0/4.0*(3.0*qtemp[pad:pad+Nx,:]-4.0*qtemp[pad+1:pad+Nx+1,:]+qtemp[pad+2:Nx+pad+2,:])**2 # nonlinear weights in x alpha_0 = (1.0/10.0)/((beta_0+1.0e-6)**2) alpha_1 = (6.0/10.0)/((beta_1+1.0e-6)**2) alpha_2 = (3.0/10.0)/((beta_2+1.0e-6)**2) # Find nonlinear weights w_0 = (alpha_0/(alpha_0+alpha_1+alpha_2))/6.0 w_1 = (alpha_1/(alpha_0+alpha_1+alpha_2))/6.0 w_2 = (alpha_2/(alpha_0+alpha_1+alpha_2))/6.0 # Find state reconstructions in x - wave to right (at i+1/2) qxright = w_0*(2.0*qtemp[pad-2:pad+Nx-2,:]-7.0*qtemp[pad-1:pad+Nx-1,:]+11.0*qtemp[pad:pad+Nx,:]) \ + w_1*(-qtemp[pad-1:pad+Nx-1,:]+5.0*qtemp[pad:pad+Nx,:]+2.0*qtemp[pad+1:pad+Nx+1,:]) \ + w_2*(2.0*qtemp[pad:pad+Nx,:]+5.0*qtemp[pad+1:pad+Nx+1,:]-qtemp[pad+2:pad+Nx+2,:]) # Find state reconstructions in x - wave to left (at i+1/2) qxleft = w_0*(2.0*qtemp[pad+2:pad+Nx+2,:]-7.0*qtemp[pad+1:pad+Nx+1,:]+11.0*qtemp[pad:pad+Nx,:]) \ + w_1*(-qtemp[pad+1:pad+Nx+1,:]+5.0*qtemp[pad:pad+Nx,:]+2.0*qtemp[pad-1:pad+Nx-1,:]) \ + w_2*(2.0*qtemp[pad:pad+Nx,:]+5.0*qtemp[pad-1:pad+Nx-1,:]-qtemp[pad-2:pad+Nx-2,:]) qxleft = qxleft[:,pad:pad+Ny] qxright = qxright[:,pad:pad+Ny] # Smoothness indicators in y beta_0 = 13.0/12.0*(qtemp[:,pad-2:pad+Ny-2]-2.0*qtemp[:,pad-1:pad+Ny-1]+qtemp[:,pad:Ny+pad])**2 \ + 1.0/4.0*(qtemp[:,pad-2:pad+Ny-2]-4.0*qtemp[:,pad-1:pad+Ny-1]+3.0*qtemp[:,pad:Ny+pad])**2 beta_1 = 13.0/12.0*(qtemp[:,pad-1:pad+Ny-1]-2.0*qtemp[:,pad:pad+Ny]+qtemp[:,pad+1:Ny+pad+1])**2 \ + 1.0/4.0*(qtemp[:,pad-1:pad+Ny-1]-qtemp[:,pad+1:pad+Ny+1])**2 beta_2 = 13.0/12.0*(qtemp[:,pad:pad+Ny]-2.0*qtemp[:,pad+1:pad+Ny+1]+qtemp[:,pad+2:Ny+pad+2])**2 \ + 1.0/4.0*(3.0*qtemp[:,pad:pad+Ny]-4.0*qtemp[:,pad+1:pad+Ny+1]+qtemp[:,pad+2:Ny+pad+2])**2 # nonlinear weights in x alpha_0 = (1.0/10.0)/((beta_0+1.0e-6)**2) alpha_1 = (6.0/10.0)/((beta_1+1.0e-6)**2) alpha_2 = (3.0/10.0)/((beta_2+1.0e-6)**2) # Find nonlinear weights w_0 = (alpha_0/(alpha_0+alpha_1+alpha_2))/6.0 w_1 = (alpha_1/(alpha_0+alpha_1+alpha_2))/6.0 w_2 = (alpha_2/(alpha_0+alpha_1+alpha_2))/6.0 # Find state reconstructions in y - qright (at i+1/2) qyright = w_0*(2.0*qtemp[:,pad-2:pad+Ny-2]-7.0*qtemp[:,pad-1:pad+Ny-1]+11.0*qtemp[:,pad:pad+Ny]) \ + w_1*(-qtemp[:,pad-1:pad+Ny-1]+5.0*qtemp[:,pad:pad+Ny]+2.0*qtemp[:,pad+1:pad+Ny+1]) \ + w_2*(2.0*qtemp[:,pad:pad+Ny]+5.0*qtemp[:,pad+1:pad+Ny+1]-qtemp[:,pad+2:pad+Ny+2]) # Find state reconstructions in y - wave to left (at i+1/2) qyleft = w_0*(2.0*qtemp[:,pad+2:pad+Ny+2]-7.0*qtemp[:,pad+1:pad+Ny+1]+11.0*qtemp[:,pad:pad+Ny]) \ + w_1*(-qtemp[:,pad+1:pad+Ny+1]+5.0*qtemp[:,pad:pad+Ny]+2.0*qtemp[:,pad-1:pad+Ny-1]) \ + w_2*(2.0*qtemp[:,pad:pad+Ny]+5.0*qtemp[:,pad-1:pad+Ny-1]-qtemp[:,pad-2:pad+Ny-2]) qyleft = qyleft[pad:pad+Nx,:] qyright = qyright[pad:pad+Nx,:] return qxleft, qxright, qyleft, qyright def reimann_solve(spec_rad,fl,fr,ql,qr,dim): # Rusanov reimann solver pad = 3 srt = periodic_bc(spec_rad,pad) if dim == 'x': srt = np.maximum.reduce([srt[pad-3:Nx+pad-3,pad:Ny+pad],srt[pad-2:Nx+pad-2,pad:Ny+pad],srt[pad-1:Nx+pad-1,pad:Ny+pad],\ srt[pad:Nx+pad,pad:Ny+pad],srt[pad+1:Nx+pad+1,pad:Ny+pad],srt[pad+2:Nx+pad+2,pad:Ny+pad],srt[pad+3:Nx+pad+3,pad:Ny+pad]]) flux = 0.5*(fr+fl) + 0.5*srt*(qr+ql) return flux else: srt = np.maximum.reduce([srt[pad:Nx+pad,pad-3:Ny+pad-3],srt[pad:Nx+pad,pad-2:Ny+pad-2],srt[pad:Nx+pad,pad-1:Ny+pad-1],\ srt[pad:Nx+pad,pad:Ny+pad],srt[pad:Nx+pad,pad+1:Ny+pad+1],srt[pad:Nx+pad,pad+2:Ny+pad+2],srt[pad:Nx+pad,pad+3:Ny+pad+3]]) flux = 0.5*(fr+fl) + 0.5*srt*(qr+ql) return flux def periodic_bc(q,pad): qtemp = np.zeros(shape=(q.shape[0]+2*pad,q.shape[1]+2*pad),dtype='double') # Periodicity updates qtemp[pad:Nx+pad,pad:Ny+pad] = q[:,:] # x direction periodicity qtemp[0:pad,:] = qtemp[Nx-pad:Nx,:] qtemp[Nx+pad:,:] = qtemp[pad:2*pad,:] # y direction periodicity qtemp[:,0:pad] = qtemp[:,Ny-pad:Ny] qtemp[:,Ny+pad:] = qtemp[:,pad:2*pad] return qtemp def spectral_radius(q1,q2): sound_speed = 2.0*np.sqrt(q1/rho*grav) u = q2/q1 return np.maximum.reduce([np.abs(u+sound_speed),np.abs(u-sound_speed),\ np.abs(sound_speed)]) def flux_reconstruction(q1,q2,q3): spec_rad_x = spectral_radius(q1,q2) spec_rad_y = spectral_radius(q1,q3) q1xleft, q1xright, q1yleft, q1yright = state_reconstruction(q1,Nx,Ny) q2xleft, q2xright, q2yleft, q2yright = state_reconstruction(q2,Nx,Ny) q3xleft, q3xright, q3yleft, q3yright = state_reconstruction(q3,Nx,Ny) # Reconstructing fluxes for q1 f1xleft = np.copy(q2xleft) f1xright = np.copy(q2xright) f1x = reimann_solve(spec_rad_x,f1xleft,f1xright,q1xleft,q1xright,'x') f1yleft = np.copy(q3yleft) f1yright = np.copy(q3yright) f1y = reimann_solve(spec_rad_y,f1yleft,f1yright,q1yleft,q1yright,'y') # Reconstructing fluxes for q2 f2xleft = (q2xleft**2)/(q1xleft) + 0.5*(q1xleft**2)*(grav/rho) f2xright = (q2xright**2)/(q1xright) + 0.5*(q1xright**2)*(grav/rho) f2x = reimann_solve(spec_rad_x,f1xleft,f2xright,q2xleft,q2xright,'x') f2yleft = (q2yleft*q3yleft/q1yleft) f2yright = (q2yright*q3yright/q1yright) f2y = reimann_solve(spec_rad_y,f2yleft,f2yright,q2yleft,q2yright,'y') # Reconstructing fluxes for q3 f3xleft = (q2xleft*q3xleft/q1xleft) f3xright = (q2xright*q3xright/q1xright) f3x = reimann_solve(spec_rad_x,f3xleft,f3xright,q3xleft,q3xright,'x') f3yleft = (q3yleft**2)/(q1yleft) + 0.5*(q1yleft**2)*(grav/rho) f3yright = (q3yright**2)/(q1yright) + 0.5*(q1yright**2)*(grav/rho) f3y = reimann_solve(spec_rad_y,f3yleft,f3yright,q3yleft,q3yright,'y') return f1x, f1y, f2x, f2y, f3x, f3y # Plotting functions def plot_coefficients(Ytilde): fig,ax = plt.subplots(nrows=1,ncols=4) ax[0].plot(Ytilde[0,:],label='Mode 1') ax[1].plot(Ytilde[1,:],label='Mode 2') ax[2].plot(Ytilde[2,:],label='Mode 3') ax[3].plot(Ytilde[3,:],label='Mode 4') plt.legend() plt.show() def plot_fields_debug(X,Y,q,label,iter): fig = plt.figure(figsize = (11, 7)) ax = Axes3D(fig) surf = ax.plot_surface(X, Y, q, rstride = 1, cstride = 1, cmap = plt.cm.jet, linewidth = 0, antialiased = True) ax.set_title('Visualization', fontname = "serif", fontsize = 17) ax.set_xlabel("x [m]", fontname = "serif", fontsize = 16) ax.set_ylabel("y [m]", fontname = "serif", fontsize = 16) if label == 'q1': ax.set_zlim((0,2)) elif label == 'q2': ax.set_zlim((-1,1)) else: ax.set_zlim((-1,1)) plt.savefig(label+'_'+str(iter)+'.png') # Shallow water equations class class shallow_water(object): """docstring for ClassName""" def __init__(self,args=[0,0]): self.Nx = Nx self.Ny = Ny self.Lx = Lx self.Ly = Ly x = np.linspace(-self.Lx/2, self.Lx/2, self.Nx) # Array with x-points y = np.linspace(-self.Ly/2, self.Ly/2, self.Ny) # Array with y-points # Meshgrid for plotting self.X, self.Y = np.meshgrid(x, y) # Initialize fields self.initialize(args) # Field storage for viz self.q_list = [] # Plot interval self.plot_interval = num_steps_per_plot # Field storage for ROM self.snapshots_pod = [] # at plot interval def initialize(self,args=[0,0]): loc_x = args[0] loc_y = args[1] # There are three conserved quantities - initialize self.q1 = 1.0+(rho*np.exp(-((self.X-loc_x)**2/(2*(0.05)**2) + (self.Y-loc_y)**2/(2*(0.05)**2)))) self.q2 = np.zeros(shape=(self.Nx,self.Ny),dtype='double') self.q3 = np.zeros(shape=(self.Nx,self.Ny),dtype='double') def right_hand_side(self,q1,q2,q3): f1x, f1y, f2x, f2y, f3x, f3y = flux_reconstruction(q1,q2,q3) # these are all i+1/2 # Periodicity pad = 1 f1xtemp = periodic_bc(f1x,pad) f1ytemp = periodic_bc(f1y,pad) f2xtemp = periodic_bc(f2x,pad) f2ytemp = periodic_bc(f2y,pad) f3xtemp = periodic_bc(f3x,pad) f3ytemp = periodic_bc(f3y,pad) r1 = 1.0/dx*(f1xtemp[pad:Nx+pad,pad:Ny+pad]-f1xtemp[pad-1:Nx+pad-1,pad:Ny+pad]) + 1.0/dy*(f1ytemp[pad:Nx+pad,pad:Ny+pad]-f1ytemp[pad:Nx+pad,pad-1:Ny+pad-1]) r2 = 1.0/dx*(f2xtemp[pad:Nx+pad,pad:Ny+pad]-f2xtemp[pad-1:Nx+pad-1,pad:Ny+pad]) + 1.0/dy*(f2ytemp[pad:Nx+pad,pad:Ny+pad]-f2ytemp[pad:Nx+pad,pad-1:Ny+pad-1]) r3 = 1.0/dx*(f3xtemp[pad:Nx+pad,pad:Ny+pad]-f3xtemp[pad-1:Nx+pad-1,pad:Ny+pad]) + 1.0/dy*(f3ytemp[pad:Nx+pad,pad:Ny+pad]-f3ytemp[pad:Nx+pad,pad-1:Ny+pad-1]) return -r1, -r2, -r3 def integrate_rk(self): # Equally spaced time integration q1temp = np.copy(self.q1) q2temp = np.copy(self.q2) q3temp = np.copy(self.q3) r1_k1, r2_k1, r3_k1 = self.right_hand_side(q1temp,q2temp,q3temp) # Note switch in sign q1temp[:,:] = self.q1[:,:] + dt*(r1_k1[:,:]) q2temp[:,:] = self.q2[:,:] + dt*(r2_k1[:,:]) q3temp[:,:] = self.q3[:,:] + dt*(r3_k1[:,:]) r1_k2, r2_k2, r3_k2 = self.right_hand_side(q1temp,q2temp,q3temp) # Note switch in sign q1temp[:,:] = self.q1[:,:] + 0.125*dt*r1_k1[:,:] + 0.125*dt*r1_k2[:,:] q2temp[:,:] = self.q2[:,:] + 0.125*dt*r2_k1[:,:] + 0.125*dt*r2_k2[:,:] q3temp[:,:] = self.q3[:,:] + 0.125*dt*r3_k1[:,:] + 0.125*dt*r3_k2[:,:] r1_k3, r2_k3, r3_k3 = self.right_hand_side(q1temp,q2temp,q3temp) # Note switch in sign self.q1[:,:] = self.q1[:,:] + (1.0/6.0)*dt*r1_k1[:,:] + (1.0/6.0)*dt*r1_k2[:,:] + (2.0/3.0)*dt*r1_k3[:,:] self.q2[:,:] = self.q2[:,:] + (1.0/6.0)*dt*r2_k1[:,:] + (1.0/6.0)*dt*r2_k2[:,:] + (2.0/3.0)*dt*r2_k3[:,:] self.q3[:,:] = self.q3[:,:] + (1.0/6.0)*dt*r3_k1[:,:] + (1.0/6.0)*dt*r3_k2[:,:] + (2.0/3.0)*dt*r3_k3[:,:] def solve(self): self.t = 0 plot_iter = 0 save_iter = 0 # Save initial conditions flattened_data = np.concatenate((self.q1.flatten(),self.q2.flatten(),self.q3.flatten()),axis=0) self.snapshots_pod.append(flattened_data) while self.t < ft: print('Time is:',self.t) self.t = self.t + dt self.integrate_rk() if plot_iter == self.plot_interval: # Save snapshots flattened_data = np.concatenate((self.q1.flatten(),self.q2.flatten(),self.q3.flatten()),axis=0) self.snapshots_pod.append(flattened_data) if plot_viz: plot_fields_debug(self.X,self.Y,self.q1,'q1',save_iter) plot_iter = 0 save_iter = save_iter + 1 plot_iter = plot_iter + 1 print('Solution finished') class shallow_water_rom(object): def __init__(self,snapshot_matrix_pod,snapshot_matrix_test): """ K - number of POD DOF for GP snapshot_matrix_pod - At snapshot location """ self.K = K self.q1_snapshot_matrix_pod = snapshot_matrix_pod[:Nx*Ny,:] self.q2_snapshot_matrix_pod = snapshot_matrix_pod[Nx*Ny:2*Nx*Ny,:] self.q3_snapshot_matrix_pod = snapshot_matrix_pod[2*Nx*Ny:,:] self.q1_snapshot_matrix_test = snapshot_matrix_test[:Nx*Ny,:] self.q2_snapshot_matrix_test = snapshot_matrix_test[Nx*Ny:2*Nx*Ny,:] self.q3_snapshot_matrix_test = snapshot_matrix_test[2*Nx*Ny:,:] # Plot interval self.plot_interval = num_steps_per_plot # Plot related self.Nx = Nx self.Ny = Ny self.Lx = Lx self.Ly = Ly x = np.linspace(-self.Lx/2, self.Lx/2, self.Nx) # Array with x-points y = np.linspace(-self.Ly/2, self.Ly/2, self.Ny) # Array with y-points # Meshgrid for plotting self.X, self.Y = np.meshgrid(x, y) def method_of_snapshots(self,snapshot_matrix_pod,snapshot_matrix_test): """ Read snapshot_matrix (field or nonlinear term) and compute the POD bases and coefficients snapshot_matrix_pod - N x S - where N is DOF, S snapshots V - truncated POD basis matrix - shape: NxK - K is truncation number Ytilde - shape: KxS - POD basis coefficients for train data Ytilde_test - shape: KxS - POD basis coefficients for test data """ new_mat = np.matmul(np.transpose(snapshot_matrix_pod),snapshot_matrix_pod) w,v = np.linalg.eig(new_mat) # Bases V = np.real(np.matmul(snapshot_matrix_pod,v)) trange = np.arange(np.shape(V)[1]) V[:,trange] = V[:,trange]/np.sqrt(w[:]) # Truncate phis V = V[:,0:self.K] # Columns are modes # Find POD coefficients Ytilde = np.matmul(np.transpose(V),snapshot_matrix_pod) Ytilde_test = np.matmul(np.transpose(V),snapshot_matrix_test) return w, V, Ytilde, Ytilde_test def svd_method(self,snapshot_matrix_pod): """ Read snapshot_matrix (field or nonlinear term) and compute the POD bases and coefficients snapshot_matrix_pod - N x S - where N is DOF, S snapshots V - truncated POD basis matrix - shape: NxK - K is truncation number Ytilde - shape: KxS - POD basis coefficients """ phi, S, Vt = np.linalg.svd(snapshot_matrix_pod) Ytilde = np.matmul(phi.T[:,truncation],snapshot_matrix) Ytilde_test = np.matmul(phi.T[:,truncation],snapshot_matrix_test) return S, phi.T[:,self.K], Ytilde, Ytilde_test def generate_pod(self): # Do the POD of the conserved variables self.q1_w, self.q1_V, self.q1_Ytilde, self.q1_Ytilde_test = self.method_of_snapshots(self.q1_snapshot_matrix_pod,self.q1_snapshot_matrix_test) self.q2_w, self.q2_V, self.q2_Ytilde, self.q2_Ytilde_test = self.method_of_snapshots(self.q2_snapshot_matrix_pod,self.q2_snapshot_matrix_test) self.q3_w, self.q3_V, self.q3_Ytilde, self.q3_Ytilde_test = self.method_of_snapshots(self.q3_snapshot_matrix_pod,self.q3_snapshot_matrix_test) # Print captured energy - using definition in https://arxiv.org/pdf/1308.3276.pdf print('Capturing ',np.sum(self.q1_w[0:self.K])/np.sum(self.q1_w),'% variance in conserved variable 1') print('Capturing ',np.sum(self.q2_w[0:self.K])/np.sum(self.q2_w),'% variance in conserved variable 2') print('Capturing ',np.sum(self.q3_w[0:self.K])/np.sum(self.q3_w),'% variance in conserved variable 3') np.save('PCA_Vectors_q1.npy',self.q1_V) # The POD bases np.save('PCA_Vectors_q2.npy',self.q2_V) np.save('PCA_Vectors_q3.npy',self.q3_V) np.save('PCA_Coefficients_q1_train.npy',self.q1_Ytilde) # The true projection np.save('PCA_Coefficients_q2_train.npy',self.q2_Ytilde) np.save('PCA_Coefficients_q3_train.npy',self.q3_Ytilde) np.save('PCA_Coefficients_q1_test.npy',self.q1_Ytilde_test) # The true projection np.save('PCA_Coefficients_q2_test.npy',self.q2_Ytilde_test) np.save('PCA_Coefficients_q3_test.npy',self.q3_Ytilde_test) def load_pregenerated_pod(self): self.q1_V = np.load('PCA_Vectors_q1.npy') # The POD bases self.q2_V = np.load('PCA_Vectors_q2.npy') self.q3_V = np.load('PCA_Vectors_q3.npy') self.q1_Ytilde = np.load('PCA_Coefficients_q1_train.npy') # The true projection self.q2_Ytilde = np.load('PCA_Coefficients_q2_train.npy') self.q3_Ytilde = np.load('PCA_Coefficients_q3_train.npy') self.q1_Ytilde_test = np.load('PCA_Coefficients_q1_test.npy') # The true projection self.q2_Ytilde_test = np.load('PCA_Coefficients_q2_test.npy') self.q3_Ytilde_test = np.load('PCA_Coefficients_q3_test.npy') def plot_reconstruction_error(self): fig,ax = plt.subplots(ncols=3) ax[0].plot(self.q1_w[:]/np.sum(self.q1_w)) ax[1].plot(self.q2_w[:]/np.sum(self.q2_w)) ax[2].plot(self.q3_w[:]/np.sum(self.q3_w)) plt.show() def solve(self): from time import time num_test = int(num_samples-num_train) self.q1_snapshots =
np.copy(self.q1_Ytilde_test)
numpy.copy
import os import cv2 import numpy as np import os import shutil import time import random import math import functools def _find_minrect(img, image_name, output_dir=None, debug_type=0, thresh_x = 120, morphology = False, channel='all', overlapthresh=.3): # param@debug_type:0,not debug; 1,store bbox file; 2,store middle caculate file; 3,show window source = img.copy() # step1: blur image max_area = source.shape[0] * source.shape[1] # Apply gaussian blur to the grayscale image # blur = cv2.pyrMeanShiftFiltering(source, 31, 91) sharpen = source # blur = cv2.pyrMeanShiftFiltering(source, 21, 51) # kernel_sharpen = np.array([[-1,-1,-1,-1,-1], # [-1,2,2,2,-1], # [-1,2,8,2,-1], # [-2,2,2,2,-1], # [-1,-1,-1,-1,-1]])/8.0 # kernel_sharpen = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]) # sharpen = cv2.filter2D(sharpen, -1, kernel_sharpen) if channel == 'all': sharpen = cv2.cvtColor(sharpen, cv2.COLOR_BGR2GRAY) else: b, g, r = cv2.split(sharpen) if channel == 'b': sharpen = b elif channel == 'g': sharpen = g elif channel == 'r': sharpen = r else: sharpen = cv2.cvtColor(sharpen, cv2.COLOR_BGR2GRAY) # 双向滤波比较不错 # blur = cv2.bilateralFilter(blur, 3, 30, 30) # blur = cv2.split(blur)[0] # blur = cv2.equalizeHist(blur) # blur = cv2.GaussianBlur(blur, (5, 5), 0) if debug_type>1: sharpen_path = os.path.join(output_dir, channel+'_'+'sharpen_'+image_name) cv2.imwrite(sharpen_path, sharpen) # step2: sobel caculate edges # kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) x = cv2.Sobel(sharpen, cv2.CV_64F, 1, 0, ksize=-1) y = cv2.Sobel(sharpen, cv2.CV_64F, 0, 1, ksize=-1) edges = cv2.subtract(x, y) edges = cv2.convertScaleAbs(edges) # absX = cv2.convertScaleAbs(x) # 转回uint8 # absY = cv2.convertScaleAbs(y) # # edges = cv2.addWeighted(absX, 0.5, absY, 0.5, 0) # edges = cv2.bilateralFilter(edges, 5, 75, 75) # edges = cv2.GaussianBlur(edges, (5, 5), 0) # edges = cv2.dilate(edges, kernel) # edges = cv2.dilate(edges, kernel) # edges = cv2.dilate(edges, kernel) # edges = cv2.erode(edges, kernel) # edges = cv2.erode(edges, kernel) # edges = cv2.erode(edges, kernel) # edges = cv2.GaussianBlur(edges, (9, 9),0) if debug_type>1: edges_path = os.path.join(output_dir, channel+'_'+'edges_'+image_name) cv2.imwrite(edges_path, edges) # step3: binary edges _, thresh1 = cv2.threshold(edges, thresh_x, 255, cv2.THRESH_BINARY) thresh2 = thresh1 # kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) # thresh2 = cv2.erode(thresh2, kernel) if morphology: kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) thresh2 = cv2.morphologyEx(thresh2, cv2.MORPH_CLOSE, kernel) # thresh2 = cv2.dilate(thresh2, kernel) # thresh2 = cv2.dilate(thresh2, kernel) # thresh2 = cv2.dilate(thresh2, kernel) # thresh2 = cv2.dilate(thresh2, kernel) # thresh2 = cv2.dilate(thresh2, kernel) # thresh2 = cv2.erode(thresh2, kernel) # thresh = cv2.GaussianBlur(thresh, (3, 3), 0) # _, thresh = cv2.threshold(gray, x, 255, cv2.THRESH_BINARY_INV) # thresh = cv2.GaussianBlur(thresh, (5, 5), 0) if debug_type>1: thresh1_path = os.path.join(output_dir, channel+'_'+'thresh1_'+image_name) cv2.imwrite(thresh1_path, thresh1) if morphology: thresh2_path = os.path.join(output_dir, channel+'_'+'thresh2_' + image_name) cv2.imwrite(thresh2_path, thresh2) # Find the edges # edges = cv2.Canny(gray,x1,x2) # edges = gray # step4: Detect contours _, contours, _ = cv2.findContours(thresh2, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) print('find contours: {}'.format(len(contours))) # print('first contour: {}'.format(contours[0])) # step5: contour filter with area area_to_contour = {} for cnt in contours: cnt = cv2.convexHull(cnt, returnPoints=True) leftmost = cnt[cnt[:, :, 0].argmin()][0][0] rightmost = cnt[cnt[:, :, 0].argmax()][0][0] topmost = cnt[cnt[:, :, 1].argmin()][0][1] bottommost = cnt[cnt[:, :, 1].argmax()][0][1] # print('%d,%d,%d,%d' %(leftmost,rightmost,topmost,bottommost)) # return area = (bottommost-topmost) * (rightmost-leftmost) if area < max_area/100: # 去除面积过小的物体 continue # if area > max_area*.9: # 去除面积过大的物体 # continue area_to_contour[area] = cnt # print(tuple(cnt[cnt[:, :, 0].argmin()][0])) # print(tuple(cnt[cnt[:, :, 0].argmax()][0])) # step6: caculate bounding box and draw contours drawing_contours = np.zeros(source.shape, np.uint8) areas = sorted(area_to_contour, reverse=True) index = 0 min_rectes = [] for area in areas: index += 1 # if index > top_n: # break cnt = area_to_contour[area] color = np.random.randint(0, 255, (3)).tolist() # Select a random color if debug_type > 1: cv2.drawContours(drawing_contours, [cnt], 0, color, 1) min_rect = cv2.minAreaRect(cnt) min_rectes.append(min_rect) # if debug_type > 1: # drawing_contours = cv2.rectangle(drawing_contours, (x, y), (x + w, y + h), (0, 255, 0), 2) if debug_type>1: contours_path = os.path.join(output_dir, channel+'_'+'contours_'+image_name) cv2.imwrite(contours_path, drawing_contours) # step7: nms min rect # min_rectes = _non_max_suppression_minrect(min_rectes, .3) if debug_type > 1 and len(min_rectes) > 0: minrect = np.copy(source) for min_rect in min_rectes: points = cv2.boxPoints(min_rect) points = np.int0(points) minrect = cv2.drawContours(minrect,[points],0,(0, 0, 255),1) minrect_path = os.path.join(output_dir, channel+'_'+'minrect_'+image_name) cv2.imwrite(minrect_path, minrect) if debug_type>2: cv2.imshow(channel+'_'+'input', sharpen) cv2.imshow(channel+'_'+'edges', edges) cv2.imshow(channel+'_'+'thresh1', thresh1) if morphology: cv2.imshow(channel+'_'+'thresh2', thresh2) cv2.imshow(channel+'_'+'drawing_contours', drawing_contours) return min_rectes class Point(object): def __init__(self, x, y): self.x = x self.y = y def __str__(self): return '[{},{}]'.format(self.x,self.y) def cmp(a, b, c): if a.x-c.x >= 0 and b.x-c.x < 0: return -1 if a.x-c.x == 0 and b.x-c.x == 0: # return a.y > b.y if a.y > b.y: return -1 elif a.y < b.y: return 1 return 0 det = (a.x - c.x) * (b.y - c.y) - (b.x - c.x) * (a.y - c.y) if det < 0: return 1 if det > 0: return -1 d1 = (a.x - c.x) * (a.x - c.x) + (a.y - c.y) * (a.y - c.y) d2 = (b.x - c.x) * (b.x - c.x) + (b.y - c.y) * (b.y - c.y) # return d1 > d2 if d1 > d2: return -1 elif d1 < d2: return 1 return 0 def _rotated_rectangle_intersection_area(s_rect,m_rect,debug=False): r1 = cv2.rotatedRectangleIntersection(s_rect, m_rect) if r1[0] == 0: return 0, None elif r1[0] == 2: return s_rect[1][0]*s_rect[1][1], None x = 0 y = 0 p = [] len_p = r1[1].shape[0] for i in range(len_p): p.append(Point(r1[1][i][0][0], r1[1][i][0][1])) x += r1[1][i][0][0] y += r1[1][i][0][1] c = Point(x / len_p, y / len_p) if debug: print('source:{}'.format(''.join(map(str,p)))) pp = sorted(p, key=functools.cmp_to_key(lambda x, y: cmp(x, y, c))) if debug: print('sorted:{}'.format(''.join(map(str,pp)))) r =
np.full((len_p, 2), 0.0, dtype='float32')
numpy.full
from __future__ import division import os import numpy as np from fdint import fdk, ifd1h from ifg.units_converter import SiAtomicConverter from ifg.utils import dump_to_csv THRESHOLD = 1e10 def _1d_call(func, array, *args, **kwargs): return func(array.reshape(-1), *args, **kwargs).reshape(array.shape) def _fdk(array, k): return fdk(k, array) def get_chemical_potential(vv, tt, gbar=2.0, *args, **kwargs): # type: (np.ndarray, np.ndarray, float, list, dict) -> np.ndarray """Get IFG chemical potential mu in atomic units. :param vv: Matrix of specific volumes in atomic units. :param tt: Matrix of temperatures in atomic units. :param gbar: degeneracy factor, for IFG g = 2s + 1 :return: `mu[i][j]` - chemical potential in atomic units. *i*-th index is for temperature, *j*-th one is for volume """ to_inverse = np.sqrt(2) * np.pi ** 2 / (gbar * tt ** (1.5) * vv) mu_div_temperature = _1d_call(ifd1h, to_inverse) mu = mu_div_temperature * tt # mu = np.multiply(temperature, mu_div_temperature.T).T return mu def get_F_potential(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs): # type: (np.ndarray, np.ndarray, np.ndarray, float, list, dict) -> np.ndarray """Get IFG Helmholtz potential F in atomic units. :param vv: Matrix of specific volumes in atomic units. :param tt: Matrix of temperatures in atomic units. :param gbar: degeneracy factor, for IFG g = 2s + 1 :param chemical_potential: Chemical potential in atomic units. :return: F[i][j] - Helmholtz free energy in atomic units. *i*-th index is for temperature, *j*-th one is for volume """ # y = chemical_potential/temperature y = chemical_potential / tt F = gbar / np.sqrt(2.0) / np.pi ** 2 * tt ** (2.5) * vv F *= y * _1d_call(_fdk, y, k=0.5) - 2.0 / 3.0 * _1d_call(_fdk, y, k=1.5) return F def get_pressure(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs): # type: (np.ndarray, np.ndarray, float, list, dict) -> np.ndarray """Get IFG pressure P in atomic units. :param vv: Matrix of specific volumes in atomic units. :param tt: Matrix of temperatures in atomic units. :param chemical_potential: Chemical potential in atomic units. :param gbar: degeneracy factor, for IFG g = 2s + 1 :return: P[i][j] - Pressure in atomic units. *i*-th index is for temperature, *j*-th one is for volume """ y = chemical_potential / tt pressure = ( gbar * np.sqrt(2) / (3 * np.pi ** 2) * tt ** (2.5) * _1d_call(_fdk, y, k=1.5) ) return pressure def get_energy(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs): # type: (np.ndarray, np.ndarray, np.ndarray, float, list, dict) -> np.ndarray """Get IFG energy E in atomic units. :param vv: Matrix of specific volumes in atomic units. :param tt: Matrix of temperatures in atomic units. :param chemical_potential: Chemical potential in atomic units. :param gbar: degeneracy factor, for IFG g = 2s + 1 :return: E[i][j] - Energy in atomic units. *i*-th index is for temperature, *j*-th one is for volume """ y = chemical_potential / tt energy = ( gbar * vv / (np.sqrt(2) * np.pi ** 2) * tt ** 2.5 * _1d_call(_fdk, y, k=1.5) ) return energy def get_entropy(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs): # type: (np.ndarray, np.ndarray, np.ndarray, float, list, dict) -> np.ndarray """Get IFG entropy S in atomic units. :param vv: Matrix of specific volumes in atomic units. :param tt: Matrix of temperatures in atomic units. :param chemical_potential: Chemical potential in atomic units. :param gbar: degeneracy factor, for IFG g = 2s + 1 :return: S[i][j] - Entropy in atomic units. *i*-th index is for temperature, *j*-th one is for volume """ y = chemical_potential / tt # There is a precision problem with "-" (minus) operator # We'll use asymptotic formula for low temperatures to avoid that problem y_low = y[y < THRESHOLD] vv_low, vv_high = vv[y < THRESHOLD], vv[y >= THRESHOLD] tt_low, tt_high = tt[y < THRESHOLD], tt[y >= THRESHOLD] # high temperatures - low numbers S_low = ( -gbar * np.sqrt(2) / (6 * np.pi ** 2) * tt_low ** (3 / 2) * vv_low * ( 3 * y_low * _1d_call(_fdk, y_low, k=1 / 2) - 5 * _1d_call(_fdk, y_low, k=3 / 2) ) ) # low temperatures - high numbers S_high = (gbar * np.pi / 6) ** (2 / 3) * tt_high * vv_high ** (2 / 3) return np.concatenate((S_low, S_high)).reshape(y.shape) def get_heat_capacity_volume(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs): # type: (np.ndarray, np.ndarray, np.ndarray, float, list, dict) -> np.ndarray """Get IFG heat capacity C_V in atomic units. :param vv: Matrix of specific volumes in atomic units. :param tt: Matrix of temperatures in atomic units. :param chemical_potential: Chemical potential in atomic units. :param gbar: degeneracy factor, for IFG g = 2s + 1 :return: C_V[i][j] - C_V in atomic units. *i*-th index is for temperature, *j*-th one is for volume """ y = chemical_potential / tt # There is a precision problem with "-" (minus) operator # We'll use asymptotic formula for high temperatures to avoid that problem y_low = y[y < THRESHOLD] vv_low, vv_high = vv[y < THRESHOLD], vv[y >= THRESHOLD] tt_low, tt_high = tt[y < THRESHOLD], tt[y >= THRESHOLD] # high temperatures - low numbers C_V_low = 5 * _1d_call(_fdk, y_low, k=-1 / 2) * _1d_call(_fdk, y_low, k=3 / 2) C_V_low -= 9 * _1d_call(_fdk, y_low, k=1 / 2) ** 2 C_V_low *= gbar * np.sqrt(2) / (4 * np.pi ** 2) * tt_low ** (3 / 2) * vv_low C_V_low /= _1d_call(_fdk, y_low, k=-1 / 2) # low temperatures - high numbers C_V_high = (gbar * np.pi / 6) ** (2 / 3) * tt_high * vv_high ** (2 / 3) return np.concatenate((C_V_low, C_V_high)).reshape(y.shape) def get_heat_capacity_pressure(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs): # type: (np.ndarray, np.ndarray, np.ndarray, float, list, dict) -> np.ndarray """Get IFG heat capacity C_P in atomic units. :param vv: Matrix of specific volumes in atomic units. :param tt: Matrix of temperatures in atomic units. :param chemical_potential: Chemical potential in atomic units. :param gbar: degeneracy factor, for IFG g = 2s + 1 :return: C_P[i][j] - C_P in atomic units. *i*-th index is for temperature, *j*-th one is for volume """ y = chemical_potential / tt # There is a precision problem with "-" (minus) operator # We'll use asymptotic formula for high temperatures to avoid that problem y_low = y[y < THRESHOLD] vv_low, vv_high = vv[y < THRESHOLD], vv[y >= THRESHOLD] tt_low, tt_high = tt[y < THRESHOLD], tt[y >= THRESHOLD] # high temperatures - low numbers C_P_low = 5 * gbar * np.sqrt(2) / (36 * np.pi ** 2) * tt_low ** (3 / 2) * vv_low C_P_low *= ( 5 * _1d_call(_fdk, y_low, k=-1 / 2) * _1d_call(_fdk, y_low, k=3 / 2) - 9 * _1d_call(_fdk, y_low, k=1 / 2) ** 2 ) C_P_low *= _1d_call(_fdk, y_low, k=3 / 2) / _1d_call(_fdk, y_low, k=1 / 2) ** 2 # low temperatures - high numbers C_P_high = (gbar * np.pi / 6) ** (2 / 3) * tt_high * vv_high ** (2 / 3) return np.concatenate((C_P_low, C_P_high)).reshape(y.shape) def get_sound_speed_temperature(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs): # type: (np.ndarray, np.ndarray, np.ndarray, float, list, dict) -> np.ndarray """Get IFG sound speed C_T in atomic units. :param vv: Matrix of specific volumes in atomic units. :param tt: Matrix of temperatures in atomic units. :param chemical_potential: Chemical potential in atomic units. :param gbar: degeneracy factor, for IFG g = 2s + 1 :return: C_T[i][j] - C_T in atomic units. *i*-th index is for temperature, *j*-th one is for volume """ y = chemical_potential / tt C_T = ( 2 ** (1 / 4) * np.sqrt(gbar) / np.pi * np.sqrt(vv) * tt ** (5 / 4) * _1d_call(_fdk, y, k=1 / 2) / np.sqrt(_1d_call(_fdk, y, k=-1 / 2)) ) return C_T def get_sound_speed_entropy(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs): # type: (np.ndarray, np.ndarray, np.ndarray, float, list, dict) -> np.ndarray """Get IFG sound speed C_S in atomic units. :param vv: Matrix of specific volumes in atomic units. :param tt: Matrix of temperatures in atomic units. :param chemical_potential: Chemical potential in atomic units. :param gbar: degeneracy factor, for IFG g = 2s + 1 :return: C_S[i][j] - C_S in atomic units. *i*-th index is for temperature, *j*-th one is for volume """ y = chemical_potential / tt C_S = ( np.sqrt(5) * np.sqrt(gbar) * 2 ** (1 / 4) / (3 * np.pi) * tt ** (5 / 4) * np.sqrt(vv * _1d_call(_fdk, y, k=3 / 2)) ) return C_S def get_all_properties(vv, tt, gbar=2.0, csv_dir=None): # type: (np.ndarray, np.ndarray, float, str) -> dict """Calculate all properties and save them to csv file. :param vv: Matrix of specific volumes in atomic units. :param tt: Matrix of temperatures in atomic units. :param vv: Specific volume in atomic units :param tt: Temperature in atomic units :param gbar: degeneracy factor, for IFG g = 2s + 1 :param csv_dir: Directory to save csv files to :return: dict {'property_name': ndarray} """ properties = dict( mu=get_chemical_potential, F=get_F_potential, p=get_pressure, S=get_entropy, C_P=get_heat_capacity_pressure, C_V=get_heat_capacity_volume, C_T=get_sound_speed_temperature, C_S=get_sound_speed_entropy, ) for key in properties.keys(): properties[key] = properties[key]( vv=vv, tt=tt, gbar=gbar, chemical_potential=properties["mu"] ) if csv_dir: for i, volume in enumerate(vv[0, :]): dump_to_csv( os.path.join( os.getcwd(), csv_dir, "{}_v={}_atomic_units.csv".format(key, volume), ), np.array([tt[0, :], properties[key][:, i]]).T, ) return properties class IfgCalculator: def __init__( self, temperatures=None, volumes=None, thetas=None, densities=None, rs=None, input_in_si=None, output_in_si=None, g=None, mr=None, ): # def __init__(self, specific_volumes, temperatures, # input_in_si, output_in_si, g=2., mr=1.): # type: (np.ndarray, np.ndarray, bool, bool, float, float) -> None """Main class for IFG calculations. :param volumes, rs, densities: Array of volumes, rs or densities, respectively (only one parameter is possible) :param temperatures, thetas: Array of temperatures or thetas, respectively (only one parameter is possible; in case of thetas the length of thetas array should be not more than 1) :param input_in_is: Whether input values are in SI units (False - atomic units, default) :param output_in_si: Whether output values are in SI units (False - atomic units, default) :param g: degeneracy of spin states, g = 2s + 1, s - spin, g = 2 by default :param mr: mass of particles with respect to electron mass, mr = 1 by default """ # Default values input_in_si_default = False output_in_si_default = False g_default = 2.0 mr_default = 1.0 # Checking if temperatures or thetas argument is given if temperatures is None and thetas is None: raise ValueError("temperatures or thetas parameter is obligatory") # Checking if both temperatures and thetas arguments are given if temperatures is not None and thetas is not None: raise ValueError( "Only one named parameter must be used for temperature: temperatures or thetas" ) # Checking if any of volumes or densities of rs argument is given if volumes is None and densities is None and rs is None: raise ValueError( "One of volumes or densities or rs parameter is obligatory" ) # Cannot have more than one argument if sum([x is not None for x in (volumes, densities, rs)]) > 1: raise ValueError( "Only one named parameter must be used for volume: volumes or densities or rs" ) # If volumes argument is given, simply convert to np.ndarray if volumes is not None: volumes = np.array(volumes) # If densities argument is given, calculate volumes if densities is not None: volumes = 1.0 / np.array(densities) # If rs argument is given, calculate volumes if rs is not None: volumes = 4.0 * np.pi * np.array(rs) ** 3 / 3.0 # If temperatures argument is given, simply convert to np.ndarray if temperatures is not None: temperatures =
np.array(temperatures)
numpy.array
import numpy as np from sklearn.cross_decomposition import PLSRegression from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from sklearn.model_selection import ShuffleSplit from sklearn.model_selection import cross_val_predict from sklearn.model_selection import cross_val_score from sklearn.utils import shuffle from numpy.linalg import matrix_rank as rank # Single step feature selection method class MCUVE: def __init__(self, x, y, ncomp=1, nrep=500, testSize=0.2): self.x = x self.y = y # The number of latent components should not be larger than any dimension size of independent matrix self.ncomp = min([ncomp, rank(x)]) self.nrep = nrep self.testSize = testSize self.criteria = None self.featureIndex = None self.featureR2 = np.full(self.x.shape[1], np.nan) self.selFeature = None def calcCriteria(self): PLSCoef = np.zeros((self.nrep, self.x.shape[1])) ss = ShuffleSplit(n_splits=self.nrep, test_size=self.testSize) step = 0 for train, test in ss.split(self.x, self.y): xtrain = self.x[train, :] ytrain = self.y[train] plsModel = PLSRegression(min([self.ncomp, rank(xtrain)])) plsModel.fit(xtrain, ytrain) PLSCoef[step, :] = plsModel.coef_.T step += 1 meanCoef = np.mean(PLSCoef, axis=0) stdCoef = np.std(PLSCoef, axis=0) self.criteria = meanCoef / stdCoef def evalCriteria(self, cv=3): self.featureIndex = np.argsort(-np.abs(self.criteria)) for i in range(self.x.shape[1]): xi = self.x[:, self.featureIndex[:i + 1]] if i<self.ncomp: regModel = LinearRegression() else: regModel = PLSRegression(min([self.ncomp, rank(xi)])) cvScore = cross_val_score(regModel, xi, self.y, cv=cv) self.featureR2[i] = np.mean(cvScore) def cutFeature(self, *args): cuti = np.argmax(self.featureR2) self.selFeature = self.featureIndex[:cuti+1] if len(args) != 0: returnx = list(args) i = 0 for argi in args: if argi.shape[1] == self.x.shape[1]: returnx[i] = argi[:, self.selFeature] i += 1 return tuple(returnx) class RT(MCUVE): def calcCriteria(self): # calculate normal pls regression coefficient plsmodel0=PLSRegression(self.ncomp) plsmodel0.fit(self.x, self.y) # calculate noise reference regression coefficient plsCoef0=plsmodel0.coef_ PLSCoef = np.zeros((self.nrep, self.x.shape[1])) for i in range(self.nrep): randomidx = list(range(self.x.shape[0])) np.random.shuffle(randomidx) ytrain = self.y[randomidx] plsModel = PLSRegression(self.ncomp) plsModel.fit(self.x, ytrain) PLSCoef[i, :] = plsModel.coef_.T plsCoef0 = np.tile(np.reshape(plsCoef0, [1, -1]), [ self.nrep, 1]) criteria = np.sum(np.abs(PLSCoef) > np.abs(plsCoef0), axis=0)/self.nrep self.criteria = criteria def evalCriteria(self, cv=3): # Note: small P value indicating important feature self.featureIndex = np.argsort(self.criteria) for i in range(self.x.shape[1]): xi = self.x[:, self.featureIndex[:i + 1]] if i<self.ncomp: regModel = LinearRegression() else: regModel = PLSRegression(min([self.ncomp, rank(xi)])) cvScore = cross_val_score(regModel, xi, self.y, cv=cv) self.featureR2[i] = np.mean(cvScore) class VC(RT): def calcCriteria(self, cv=3): # calculate normal pls regression coefficient nVar = self.x.shape[1] sampleMatrix = np.ndarray([self.nrep,self.x.shape[1]], dtype=int) sampleMatrix[:, :] = 0 errVector = np.ndarray([self.nrep,1]) # The number of variable in combination should less than the total variable number if nVar > self.ncomp: nSample = max([self.ncomp, nVar//10]) else: nSample = max([1, nVar-1]) sampleidx = range(self.x.shape[1]) for i in range(self.nrep): sampleidx = shuffle(sampleidx) seli = sampleidx[:nSample] plsModel = PLSRegression(n_components=min([self.ncomp, rank(self.x[:, seli])])) plsModel.fit(self.x[:, seli], self.y) sampleMatrix[i, seli] = 1 yhati=cross_val_predict(plsModel, self.x[:, seli], self.y, cv=cv) errVector[i] = np.sqrt(mean_squared_error(yhati, self.y)) plsModel = PLSRegression(n_components=self.ncomp) plsModel.fit(sampleMatrix, errVector) self.criteria = plsModel.coef_.ravel() # Recursive feature selection method class MSVC: def __init__(self, x, y, ncomp=1, nrep=7000, ncut=50, testSize=0.2): self.x = x self.y = y # The number of latent components should not be larger than any dimension size of independent matrix self.ncomp = min([ncomp,
rank(x)
numpy.linalg.matrix_rank
# -*- coding: utf-8 -*- """ This code allows us to run classical clustering approaches namely Kmeans, Spherical Kmeans and Auto-encoder """ import numpy as np import pandas as pd from sklearn.utils import check_random_state from coclust import clustering from sklearn.cluster import KMeans from sklearn.mixture import GaussianMixture from coclust.evaluation.external import accuracy from sklearn.metrics.cluster import normalized_mutual_info_score from sklearn.metrics.cluster import adjusted_rand_score from sklearn import metrics import time import random from keras.models import Model from keras.layers import Dense, Input def random_init(n_clusters, n_cols, random_state=None): """Create a random column cluster assignment matrix. Each row contains 1 in the column corresponding to the cluster where the processed data matrix column belongs, 0 elsewhere. Parameters ---------- n_clusters: int Number of clusters n_cols: int Number of columns of the data matrix (i.e. number of rows of the matrix returned by this function) random_state : int or :class:`numpy.RandomState`, optional The generator used to initialize the cluster labels. Defaults to the global numpy random number generator. Returns ------- matrix Matrix of shape (``n_cols``, ``n_clusters``) """ if random_state == None: W_a = np.random.randint(n_clusters, size=n_cols) else: random_state = check_random_state(random_state) W_a = random_state.randint(n_clusters, size=n_cols) W = np.zeros((n_cols, n_clusters)) W[np.arange(n_cols), W_a] = 1 return W def purity_score(y_true, y_pred): # compute contingency matrix (also called confusion matrix) contingency_matrix = metrics.cluster.contingency_matrix(y_true, y_pred) # return purity return np.sum(np.amax(contingency_matrix, axis=0)) / np.sum(contingency_matrix) global_path = './data/' path_to_save = './results/resultViewClustering/' nom_BDD = ['DBLP1','DBLP2','PubMed_Diabets' ,'classic3', 'classic4', 'ag_news'] nCLusters = [3,3,3,3, 4, 4] nbrIteration = 30 nbSlices = 8 nbAlgorithms = 3 df_results = pd.DataFrame(columns=["Dataset", "Time", "ACC", "NMI", "ARI", "Purity",'Algorithm','View'], index=np.arange(nbrIteration * ((len(nom_BDD) * nbSlices*nbAlgorithms ))).tolist()) cpt = 0 for nb in range(len(nom_BDD)): print('###############################################') print('nom_BDD ', nom_BDD[nb]) bdd_name = nom_BDD[nb] inf_doc = pd.read_csv(global_path+bdd_name+'/' + bdd_name+'.csv', delimiter=',') abstracts = np.asarray(inf_doc['text']).astype(str).tolist() # print(inf_doc) labels_all = np.asarray(inf_doc['label']).astype(int) n_new = inf_doc.shape[0] d_new = inf_doc.shape[1] labels = labels_all[0:n_new] labels = labels.tolist() ################################################################## # hyperparameters # ################################################################## K = nCLusters[nb] print('K ', K) del inf_doc ################################################################## # Load DBLP1 dataset # ################################################################## simBow = np.load(global_path +bdd_name+'/' + 'view_bow' + '.npz') simBow = simBow['arr_0'] print('simBow ', simBow.shape) simBert = np.load(global_path +bdd_name+'/' + 'view_bert-base-cased' + '.npz') simBert = simBert['arr_0'] simBertLPCA = np.load(global_path +bdd_name+'/' + 'view_avgpca__bert-large-cased' + '.npz') simBertLPCA = simBertLPCA['arr_0'] print('simBertLPCA ', simBertLPCA.shape) simRoBertLPCA = np.load(global_path +bdd_name+'/' + 'view_avgpca__roberta-large' + '.npz') simRoBertLPCA = simRoBertLPCA['arr_0'] simRoBerta = np.load(global_path +bdd_name+'/' + 'view_roberta-large' + '.npz') simRoBerta = simRoBerta['arr_0'] simSentenceRoBerta = np.load(global_path +bdd_name+'/'+ 'sim_sentenceRoberta' +'.npz') simSentenceRoBerta = simSentenceRoBerta['arr_0'] print('simSentenceRoBerta ', simSentenceRoBerta.shape) simGlove = np.load(global_path +bdd_name+'/' + 'view_glove' + '.npz') simGlove = simGlove['arr_0'] print('simGlove ', simGlove.shape) simW2V = np.load(global_path +bdd_name+'/' + 'view_w2v' + '.npz') simW2V = simW2V['arr_0'] simEntity = np.load(global_path +bdd_name+'/' + 'view_entity' + '.npz') simEntity = simEntity['arr_0'] print('simEntity ', simEntity.shape) viewsNames = ['BOW','Bert','BertLPCA','RoBertLPCA', 'SentenceRo', 'GLOVE', 'W2V','Entity'] data = [simBow,simBert,simBertLPCA,simRoBertLPCA,simSentenceRoBerta,simGlove,simW2V,simEntity] print(len(data)) del simBow del simGlove del simW2V del simBert del simRoBerta del simEntity del simBertLPCA del simRoBertLPCA ################################################################## ########################## Version Hard ######################### ################################################################## for v_ in range(len(data)): viewName = viewsNames[v_] data_view = data[v_] print('###############################################') print('viewName ',viewName) for it in range(nbrIteration): random.seed(it) np.random.seed(it) print("iter " + str(it)) ########################################################## print('################### skmeans ######################') Z_init = random_init(K, n_new) colSum= data_view.sum(0) print('colSum',colSum) print(np.sum(colSum==0)) start_time = time.time() model = clustering.spherical_kmeans.SphericalKmeans(n_clusters=K, max_iter=100, n_init=1,tol=1e-09, weighting=False) model.fit(data_view) end_time = time.time() phiK = model.labels_ phiK = np.asarray(phiK) time_ = end_time - start_time acc = np.around(accuracy(labels, phiK), 3) nmi = np.around(normalized_mutual_info_score(labels, phiK), 3) ari = np.around(adjusted_rand_score(labels, phiK), 3) purity = np.around(purity_score(labels, phiK), 3) print("Accuracy : ", acc) print("nmi : ", nmi) print("ari : ", ari) print("purity : ", purity) df_results.Algorithm[cpt] = 'Skmeans' df_results.View[cpt] = viewName df_results.Dataset[cpt] = bdd_name df_results.Time[cpt] = str(time_) df_results.ACC[cpt] = str(acc) df_results.NMI[cpt] = str(nmi) df_results.ARI[cpt] = str(ari) df_results.Purity[cpt] = str(purity) cpt = cpt + 1 ########################################################## print('################### kmeans ######################') Z_init = random_init(K, n_new) start_time = time.time() kmeans = KMeans(n_clusters=K, random_state=0).fit(data_view) end_time = time.time() phiK = kmeans.labels_ phiK =
np.asarray(phiK)
numpy.asarray
# This code is part of Mthree. # # (C) Copyright IBM 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. # pylint: disable=no-name-in-module """Test collection classes""" import numpy as np from qiskit import QuantumCircuit, execute from qiskit.test.mock import FakeAthens import mthree def test_mit_overhead(): """Test if mitigation overhead over collection is same as loop """ backend = FakeAthens() qc = QuantumCircuit(5) qc.h(2) qc.cx(2, 1) qc.cx(2, 3) qc.cx(1, 0) qc.cx(3, 4) qc.measure_all() raw_counts = execute([qc]*10, backend).result().get_counts() mit = mthree.M3Mitigation(backend) mit.cals_from_system() mit_counts = mit.apply_correction(raw_counts, qubits=range(5), return_mitigation_overhead=True) ind_overheads = np.asarray([cnt.mitigation_overhead for cnt in mit_counts]) assert
np.allclose(mit_counts.mitigation_overhead, ind_overheads)
numpy.allclose
# -*- coding: utf-8 -*- """ Created on Mon Dec 3 17:27:46 2018 @author: <NAME> Implementation of information representation based multi-layer classifier using GFMM Note: Currently, all samples in the dataset must be normalized to the range of [0, 1] before using this class """ import sys, os sys.path.insert(0, os.path.pardir) import numpy as np import math import ast import time import multiprocessing from functionhelper.bunchdatatype import Bunch from functionhelper.membershipcalc import memberG, asym_similarity_one_many from functionhelper.preprocessinghelper import read_file_in_chunks_group_by_label, read_file_in_chunks, string_to_boolean, loadDataset from functionhelper.hyperboxadjustment import isOverlap, hyperboxOverlapTest, modifiedIsOverlap, hyperboxContraction from concurrent.futures import ProcessPoolExecutor, as_completed def get_num_cpu_cores(): num_cores = multiprocessing.cpu_count() if num_cores >= 4: num_cores = num_cores - 2 return num_cores class Info_Presentation_Multi_Layer_Classifier_GFMM(object): def __init__(self, teta = [0.1, 0.5], gamma = 1, simil_thres = 0.5, oper = 'min'): self.gamma = gamma self.teta_onl = teta[0] self.higher_teta = teta[1:] self.oper = oper self.simil_thres = simil_thres def homogeneous_hyperbox_expansion(self, X_l, X_u, patClassId, current_hyperboxes): """ Expand current hyperboxes to cover input patterns, all input samples have the same label with each other as well as current hyperboxes (if exists) Update the number of patterns contained in the hyperboxes and their centroids of samples INPUT Xl Input data lower bounds (rows = objects, columns = features) Xu Input data upper bounds (rows = objects, columns = features) patClassId Input data class labels (crisp). patClassId[i] = 0 corresponds to an unlabeled item current_hyperboxes A list of current hyperboxes in the Bunch datatype (properties: lower, upper, classId, no_pat, centroid) OUTPUT result A bunch data size with lower and upper bounds, class labels of hyperboxes """ yX = X_l.shape[0] V = current_hyperboxes.lower W = current_hyperboxes.upper classId = current_hyperboxes.classId no_Pats = current_hyperboxes.no_pat centroid = current_hyperboxes.centroid # for each input sample for i in range(yX): classOfX = patClassId[i] if V.size == 0: # no model provided - starting from scratch V = np.array([X_l[i]]) W = np.array([X_u[i]]) classId = np.array([patClassId[i]]) no_Pats = np.array([1]) centroid = np.array([(X_l[i] + X_u[i]) / 2]) else: b = memberG(X_l[i], X_u[i], V, W, self.gamma, self.oper) index = np.argsort(b)[::-1] bSort = b[index]; if bSort[0] != 1: adjust = False for j in index: # test violation of max hyperbox size and class labels if ((np.maximum(W[j], X_u[i]) -
np.minimum(V[j], X_l[i])
numpy.minimum
import copy import numpy as np import torch mat = torch.Tensor(np.load('/home/FQuinton/Bureau/data_pse/META/stats.npy')).int() dic = { 0: "Prairie", 1: "Triticale", 2: "Maïs", 3: "Seigle", 4: "Blé", 5: "Colza", 6: "<NAME>", 7: "Tournesol", 8: "Vigne", 9: "Soja", 10: "Sorghum", 11: "Luzerne", 12: "<NAME>", 13: "<NAME>", 14: "<NAME>", 15: "Fleurs fruits legumes", 16: "<NAME>", 17: "<NAME>", 18: "<NAME>", 19: "<NAME>" } mat_norm = torch.zeros((20,20,20)) for i in range(20): for j in range(20): for k in range(20): if(torch.sum(mat[i][j]) != 0): mat_norm[i][j][k] = mat[i][j][k]/torch.sum(mat[i][j]) else : mat_norm[i][j][k] = 0 max_list = [] mat_copy = copy.deepcopy(mat_norm) for i in range(500): val = torch.where(mat_copy == torch.amax(mat_copy)) mat_copy[val[0][0]][val[1][0]][val[2][0]] = -1 if(torch.sum(mat[val[0][0]][val[1][0]]).item() > 9 and round(mat_norm[val[0][0]][val[1][0]][val[2][0]].item(),2) > 0.1): max_list.append([round(mat_norm[val[0][0]][val[1][0]][val[2][0]].item(),2), torch.sum(mat[val[0][0]][val[1][0]]).item(), mat[val[0][0]][val[1][0]][val[2][0]].item(), dic[val[0][0].item()], dic[val[1][0].item()], dic[val[2][0].item()]]) max_list.sort(reverse = True) m = np.array(max_list) m = m[:,2].astype(int) n =
np.sum(m)
numpy.sum
import numpy as np import networkx as nx import itertools from collections import deque from tempfile import NamedTemporaryFile from distutils.spawn import find_executable from subprocess import check_call from xml.etree import cElementTree from ..constants import res, log _METERS_TO_INCHES = 1.0 / .0254 _STEP_FACETER = find_executable('export_product_asm') def load_step(file_obj, file_type=None): ''' Use the STEPtools Inc. Author Tools binary to mesh a STEP file, and return a list of Trimesh objects. Using this over openCASCADE as it is signifigantly more stable (though not OSS.) STEPtools Inc. provides the binary under this license: http://www.steptools.com/demos/license_author.html To install the required binary ('export_product_asm') into PATH: wget http://www.steptools.com/demos/stpidx_author_linux_x86_64_16.0.zip unzip stpidx_author_linux_x86_64_16.0.zip sudo cp stpidx_author_linux_x86_64/bin/export_product_asm /usr/bin/ Arguments ---------- file_obj: file like object containing step file file_type: unused Returns ---------- meshes: list of Trimesh objects (with correct metadata set from STEP file) ''' with NamedTemporaryFile() as out_file: with NamedTemporaryFile(suffix='.STEP') as in_file: if hasattr(file_obj, 'read'): in_file.write(file_obj.read()) in_file.seek(0) file_name = in_file.name else: file_name = file_obj check_call([_STEP_FACETER, file_name, '-tol', str(res.mesh), '-o', out_file.name]) t = cElementTree.parse(out_file) meshes = {} # get the meshes without metadata from the XML document for shell in t.findall('shell'): # query the xml structure for vertices and faces vertices = np.array([v.get('p').split() for v in shell.findall('.//v')], dtype=np.float) faces = np.array([f.get('v').split() for f in shell.findall('.//f')], dtype = np.int) # normals aren't always returned but faces have correct winding # so they are autogenerated correctly from dot products mesh = {'vertices': vertices, 'faces' : faces, 'metadata': {}} # store the mesh by id reference meshes[shell.get('id')] = mesh try: # populate the graph of shapes and transforms g = nx.MultiDiGraph() # keys: {mesh id : shape id} mesh_shape = {} # assume that the document has consistant units to_inches = None for shape in t.findall('shape'): shape_id = shape.get('id') shape_unit = shape.get('unit') mesh_id = shape.get('shell') if not shape_unit is None: to_inches = float(shape_unit.split()[1]) * _METERS_TO_INCHES if not mesh_id is None: for i in mesh_id.split(): mesh_shape[i] = shape_id #g.node[shape_id]['mesh'] = mesh_id g.add_node(shape_id, {'mesh' : mesh_id}) for child in shape.getchildren(): child_id = child.get('ref') transform = np.array(child.get('xform').split(), dtype=np.float).reshape((4,4)).T g.add_edge(shape_id, child_id, transform=transform) # which product ID has the root shape prod_root = t.getroot().get('root') shape_root = None for prod in t.findall('product'): prod_id = prod.get('id') prod_name = prod.get('name') prod_shape = prod.get('shape') if prod_id == prod_root: shape_root = prod_shape g.node[prod_shape]['product_name'] = prod_name # now that the assembly tree has been populated, traverse it to # find the final transforms and quantities for the meshes we extracted for mesh_id in meshes.keys(): shape_id = mesh_shape[mesh_id] transforms_all = deque() path_str = deque() if shape_id == shape_root: paths = [[shape_id, shape_id]] else: paths = nx.all_simple_paths(g, shape_root, shape_id) paths = np.array(list(paths)) garbage, unique = np.unique(['.'.join(i) for i in paths], return_index=True) paths = paths[unique] for path in paths: path_name = [g.node[i]['product_name'] for i in path[:-1]] edges = np.column_stack((path[:-1], path[:-1])).reshape(-1)[1:-1].reshape((-1,2)) transforms = [np.eye(4)] for e in edges: # get every transform from the edge local = [i['transform'] for i in g.edge[e[0]][e[1]].values()] # all the transforms are sequential, so we want combinations transforms = [np.dot(*i) for i in itertools.product(transforms, local)] transforms_all.extend(transforms) path_str.extend(['/'.join(path_name)]*len(transforms)) meshes[mesh_id]['vertices'] *= to_inches meshes[mesh_id]['metadata']['units'] = 'inches' meshes[mesh_id]['metadata']['name'] = path_name[-1] meshes[mesh_id]['metadata']['paths'] =
np.array(path_str)
numpy.array
#!/usr/bin/env python import sys, os import argparse import numpy as np import pandas as pd import matplotlib.pyplot as plt import mcmc_sampler_sbm from estimate_cluster import estimate_clustering from sklearn.cluster import KMeans from scipy.stats import mode from collections import Counter from matplotlib2tikz import save as tikz_save ## Boolean type for parser def str2bool(v): if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') ######################################### ## Analysis of the Enron Email Network ## ######################################### ## Import the dataset enron = np.loadtxt('Datasets/enron_edges.txt',dtype=int,delimiter='\t')-1 ###### MODEL ARGUMENTS ## PARSER to give parameter values parser = argparse.ArgumentParser() # Boolean variable for coclustering parser.add_argument("-c","--coclust", type=str2bool, dest="coclust", default=False, const=False, nargs="?",\ help="Boolean variable for coclustering, default FALSE") # Boolean variable to use second level clustering (default True) parser.add_argument("-s","--sord", type=str2bool, dest="second_order_clustering", default=True, const=False, nargs="?",\ help="Boolean variable for second level clustering, default TRUE") # Add options for .tex figures parser.add_argument("-t","--tex", type=str2bool, dest="tex_figures", default=False, const=False, nargs="?",\ help="Boolean variable for .tex figures, default FALSE") # Burnin parser.add_argument("-B","--nburn", type=int, dest="nburn", default=25000, const=True, nargs="?",\ help="Integer: length of burnin, default 25000") # Number of samples parser.add_argument("-M","--nsamp", type=int, dest="nsamp", default=500000, const=True, nargs="?",\ help="Integer: length of MCMC chain after burnin, default 500000") ## Set destination folder for output parser.add_argument("-f","--folder", type=str, dest="dest_folder", default="Results", const=True, nargs="?",\ help="String: name of the destination folder for the output files (*** the folder must exist ***)") ## Parse arguments args = parser.parse_args() coclust = args.coclust second_order_clustering = args.second_order_clustering tex_figures = args.tex_figures nburn = args.nburn nsamp = args.nsamp dest_folder = args.dest_folder # Create output directory if doesn't exist if dest_folder != '' and not os.path.exists(dest_folder): os.mkdir(dest_folder) ## Create the adjacency matrix n = np.max(enron)+1 A = np.zeros((n,n)) for link in enron: A[link[0],link[1]] = 1.0 ## Construct the Gibbs sampling object g = mcmc_sampler_sbm.mcmc_sbm(A,m=25) ## Initialise the clusters using k-means if not coclust: g.init_cluster(z=KMeans(n_clusters=5).fit(g.X['s'][:,:5]).labels_ if g.directed else KMeans(n_clusters=5).fit(g.X[:,:5]).labels_) else: g.init_cocluster(zs=KMeans(n_clusters=5).fit(g.X['s'][:,:5]).labels_,zr=KMeans(n_clusters=5).fit(g.X['r'][:,:5]).labels_) ## Average within-cluster variance v = {} v['s'] = np.zeros(g.m) v['r'] = np.zeros(g.m) for key in ['s','r']: for k in range(g.K[key] if g.coclust else g.K): v[key] += np.var(g.X[key][(g.z[key] if g.coclust else g.z) == k],axis=0) / (g.K[key] if g.coclust else g.K) ## Initialise d g.init_dim(d=g.K[key] if g.coclust else g.K,delta=0.1,d_constrained=False) ## Initialise the parameters of the Gaussian distribution g.prior_gauss_left_directed(mean0s=np.zeros(g.m),mean0r=np.zeros(g.m),Delta0s=np.diag(v['s']),Delta0r=np.diag(v['r'])) g.prior_gauss_right_directed(sigma0s=np.var(g.X['s'],axis=0),sigma0r=np.var(g.X['r'],axis=0)) ## Initialise the second level clustering if second_order_clustering: if coclust: g.init_group_variance_coclust(vs=range(g.K['s']),vr=range(g.K['r'])) else: g.init_group_variance_clust(v=range(g.K),beta=1.0) ## MCMC sampler d = [] if g.coclust: K = {} Ko = {} if second_order_clustering: H = {} Ho = {} for key in ['s','r']: K[key] = [] Ko[key] = [] if second_order_clustering: H[key] = [] Ho[key] = [] else: K = [] Ko = [] if second_order_clustering: H = [] Ho = [] ## Posterior similarity matrix if g.coclust: psm = {} for key in ['s','r']: psm[key] = np.zeros((n,n)) else: psm = np.zeros((n,n)) ## Sampler for s in range(nburn+nsamp): ## Print status of MCMC if s < nburn: sys.stdout.write("\r+++ Burnin +++ %d / %d " % (s+1,nburn)) sys.stdout.flush() elif s == nburn: sys.stdout.write("\n") elif s < nburn + nsamp - 1: sys.stdout.write("\r+++ Sweeps +++ %d / %d " % (s+1-nburn,nsamp)) sys.stdout.flush() else: sys.stdout.write("\r+++ Sweeps +++ %d / %d\n " % (s+1-nburn,nsamp)) ## Choice of the move if second_order_clustering: ## - with second order clustering move = np.random.choice(['gibbs_comm','split_merge','change_dim','prop_empty','gibbs_comm_so','split_merge_so','prop_empty_so']) else: ## - without second order clustering move = np.random.choice(['gibbs_comm','split_merge','change_dim','prop_empty']) if move == 'gibbs_comm': g.gibbs_communities() elif move == 'split_merge': g.split_merge() elif move == 'change_dim': g.dimension_change(prop_step=3) elif move == 'gibbs_comm_so': g.gibbs_second_order() elif move == 'split_merge_so': g.split_merge_second_order() elif move == 'prop_empty_so': g.propose_empty_second_order() else: g.propose_empty() ## Update the parameters if s >= nburn: d += [g.d] if g.coclust: for key in ['s','r']: K[key] += [g.K[key]] Ko[key] += [g.Ko[key]] if second_order_clustering: H[key] += [g.H[key]] Ho[key] += [np.sum(g.vk[key] > 0)] psm[key] += np.equal.outer(g.z[key],g.z[key]) else: K += [g.K] Ko += [g.Ko] if second_order_clustering: H += [g.H] Ho += [np.sum(g.vk > 0)] psm += np.equal.outer(g.z,g.z) ## Convert to arrays d = np.array(d) if coclust: for key in ['s','r']: K[key] = np.array(K[key]) Ko[key] = np.array(Ko[key]) if second_order_clustering: H[key] = np.array(H[key]) Ho[key] = np.array(Ho[key]) else: K = np.array(K) Ko = np.array(Ko) if second_order_clustering: H = np.array(H) Ho = np.array(Ho) ## Save files if dest_folder == '': np.savetxt('d.txt',d,fmt='%d') if coclust: for key in ['s','r']: np.savetxt('K_'+key+'.txt',K[key],fmt='%d') np.savetxt('Ko_'+key+'.txt',Ko[key],fmt='%d') if second_order_clustering: np.savetxt('H_'+key+'.txt',H[key],fmt='%d') np.savetxt('Ho_'+key+'.txt',Ho[key],fmt='%d') np.savetxt('psm_'+key+'.txt',psm[key]/float(np.max(psm[key])),fmt='%f') else: np.savetxt('K.txt',K,fmt='%d') np.savetxt('Ko.txt',Ko,fmt='%d') if second_order_clustering: np.savetxt('H.txt',H,fmt='%d') np.savetxt('Ho.txt',Ho,fmt='%d') np.savetxt('psm.txt',psm/float(np.max(psm)),fmt='%f') else: np.savetxt(dest_folder+'/d.txt',d,fmt='%d') if coclust: for key in ['s','r']: np.savetxt(dest_folder+'/K_'+key+'.txt',K[key],fmt='%d') np.savetxt(dest_folder+'/Ko_'+key+'.txt',Ko[key],fmt='%d') if second_order_clustering: np.savetxt(dest_folder+'/H_'+key+'.txt',H[key],fmt='%d') np.savetxt(dest_folder+'/Ho_'+key+'.txt',Ho[key],fmt='%d') np.savetxt(dest_folder+'/psm_'+key+'.txt',psm[key]/float(np.max(psm[key])),fmt='%f') else: np.savetxt(dest_folder+'/d.txt',d,fmt='%d') np.savetxt(dest_folder+'/K.txt',K,fmt='%d') np.savetxt(dest_folder+'/Ko.txt',Ko,fmt='%d') if second_order_clustering: np.savetxt(dest_folder+'/H.txt',H,fmt='%d') np.savetxt(dest_folder+'/Ho.txt',Ho,fmt='%d') np.savetxt(dest_folder+'/psm.txt',psm/float(np.max(psm)),fmt='%f') ##### Plots ##### ## Scree plot U,S,V = np.linalg.svd(A) plt.figure() plt.plot(np.arange(len(S))+1,S,c='black') plt.plot(np.arange(len(S))+1,S,'.',markersize=.3,c='black') plt.plot(mode(d)[0][0]+1,S[mode(d)[0][0]],"o",c='red') if dest_folder == '': if not tex_figures: plt.savefig('scree_plot.pdf') else: tikz_save('scree_plot.tex') else: if not tex_figures: plt.savefig(dest_folder+'/scree_plot.pdf') else: tikz_save(dest_folder+'/scree_plot.tex') ## Posterior barplot (unrestricted) if coclust: for key in ['s','r']: plt.figure() fig, ax = plt.subplots() ax.bar(np.array(Counter(Ko[key]).keys())-.35,Counter(Ko[key]).values(),width=0.35,color='black',align='edge',alpha=.8,label='$K_\\varnothing$') if second_order_clustering: ax.bar(np.array(Counter(Ho[key]).keys()),Counter(Ho[key]).values(),width=0.35,color='gray',align='edge',alpha=.8,label='$H_\\varnothing$') leg = ax.legend() ax.axvline(x=mode(d)[0][0],linestyle='--',c='red') if dest_folder == '': if not tex_figures: plt.savefig('posterior_barplot_unrestricted_'+key+'.pdf') else: tikz_save('posterior_barplot_unrestricted_'+key+'.tex') else: if not tex_figures: plt.savefig(dest_folder+'/posterior_barplot_unrestricted_'+key+'.pdf') else: tikz_save(dest_folder+'/posterior_barplot_unrestricted_'+key+'.tex') else: plt.figure() fig, ax = plt.subplots() ax.bar(np.array(Counter(Ko).keys())-.35,Counter(Ko).values(),width=0.35,color='black',align='edge',alpha=.8,label='$K_\\varnothing$') if second_order_clustering: ax.bar(np.array(Counter(Ho).keys()),Counter(Ho).values(),width=0.35,color='gray',align='edge',alpha=.8,label='$H_\\varnothing$') leg = ax.legend() ax.axvline(x=mode(d)[0][0],linestyle='--',c='red') if dest_folder == '': if not tex_figures: plt.savefig('posterior_barplot_unrestricted.pdf') else: tikz_save('posterior_barplot_unrestricted.tex') else: if not tex_figures: plt.savefig(dest_folder+'/posterior_barplot_unrestricted.pdf') else: tikz_save(dest_folder+'/posterior_barplot_unrestricted.tex') ## Posterior barplot (restricted) if coclust: for key in ['s','r']: plt.figure() fig, ax = plt.subplots() ax.bar(np.array(Counter((Ko[key])[Ko[key] >= d]).keys())-.35,Counter((Ko[key])[Ko[key] >= d]).values(),width=0.35, color='black',align='edge',alpha=.8,label='$K_\\varnothing$') if second_order_clustering: ax.bar(np.array(Counter((Ho[key])[Ko[key] >= d]).keys()),Counter((Ho[key])[Ko[key] >= d]).values(),width=0.35, color='gray',align='edge',alpha=.8,label='$H_\\varnothing$') leg = ax.legend() ax.axvline(x=mode(d)[0][0],linestyle='--',c='red') if dest_folder == '': if not tex_figures: plt.savefig('posterior_barplot_restricted_'+key+'.pdf') else: tikz_save('posterior_barplot_restricted_'+key+'.tex') else: if not tex_figures: plt.savefig(dest_folder+'/posterior_barplot_restricted_'+key+'.pdf') else: tikz_save(dest_folder+'/posterior_barplot_restricted_'+key+'.tex') else: plt.figure() fig, ax = plt.subplots() ax.bar(np.array(Counter(Ko[Ko >= d]).keys())-.35,Counter(Ko[Ko >= d]).values(),width=0.35,color='black',align='edge',alpha=.8,label='$K_\\varnothing$') if second_order_clustering: ax.bar(np.array(Counter(Ho[Ko >= d]).keys()),Counter(Ho[Ko >= d]).values(),width=0.35,color='gray',align='edge',alpha=.8,label='$H_\\varnothing$') leg = ax.legend() ax.axvline(x=mode(d)[0][0],linestyle='--',c='red') if dest_folder == '': if not tex_figures: plt.savefig('posterior_barplot_restricted.pdf') else: tikz_save('posterior_barplot_restricted.tex') else: if not tex_figures: plt.savefig(dest_folder+'/posterior_barplot_restricted.pdf') else: tikz_save(dest_folder+'/posterior_barplot_restricted.tex') ## MAP for clustering if coclust: cc_pear = {} cc_map = {} for key in ['s','r']: cc_pear[key] = estimate_clustering(psm[key]) cc_map[key] = estimate_clustering(psm[key],k=mode(Ko[key])[0][0]) if dest_folder == '':
np.savetxt('pear_clusters_'+key+'.txt',cc_pear[key],fmt='%d')
numpy.savetxt
import os import numpy as np import h5py as h5 import glob import shutil from .data_reader import DataReader_pred from .predict_fn import pred_fn import pkg_resources model_dir = pkg_resources.resource_filename('phasenet', os.path.join('model', '190703-214543')) script_path = os.path.dirname(os.path.realpath(__file__)) def format_data_hdf5(data, root_PN_inputs='.', filename='data.h5'): """Format data for PhasetNet (hdf5). Save the data array in an hdf5 file such that PhaseNet can process it. Parameters ------------- data: (n_stations, 3, n_samples) nd.array Numpy array with the continuous 3-component seismic data on which we want to pick the P- and S-wave arrivals. root_PN_inputs: string, default to '.' Path to the root folder where formatted data will be stored. filename: string, default to 'data.h5' Name of the file listing the filenames of all 3-component time series to process. """ import h5py as h5 with h5.File(os.path.join(root_PN_inputs, filename), 'w') as f: f.create_group('data') for i in range(data.shape[0]): # place the component axis at the end three_comp_data = np.swapaxes(data[i, ...], 0, 1) f['data'].create_dataset(f'sample{i}', data=three_comp_data) def format_data_ram(data): """Format data for PhasetNet. Build the data dictionary for PhaseNet. Parameters ------------- data: (n_stations, 3, n_samples) nd.array Numpy array with the continuous 3-component seismic data on which we want to pick the P- and S-wave arrivals. """ data_pn = {} for i in range(data.shape[0]): data_pn[f'sample{i}'] = np.swapaxes(data[i, ...], 0, 1) return data_pn def run_pred(input_length, model_path=model_dir, data=None, data_path='./dataset/waveform_pred/', log_dir='./dataset/log/', data_file='./dataset/data.h5', format='hdf5', amplitude=False, batch_size=1, threshold_P=0.6, threshold_S=0.6, **kwargs): """Run PhaseNet and fetch its raw output: the P and S probabilities. Results are stored at the user-defined location `output_filename`. Extra kwargs are passed to `phasenet.predict_fn.pred_fn`. Parameters ------------ input_length: int Duration, in samples, of the 3-component seismograms. model_path: string, default to '/home/ebeauce/PhaseNet/model/190703-214543' Path to the trained model. It is of course necessary to change the default value to the adequate value on your machine (e.g. where you downloaded PhaseNet). data_path: string, default to './dataset/waveform_pred/' Path to the folder with the 3-component seismograms in npz files. log_dir: string, default to './dataset/log/' data_list: string, default to './dataset/data_list.csv' output_filename: string, default to './prediction.npy' Name of the file with PhaseNet's outputs. batch_size: int, default to 1 Number of 3-component seismograms processed by PhaseNet at once. This should to take into account the machine's RAM. threshold_P: float, default to 0.6 P-wave identification threshold. When PhaseNet's raw output (proba) exceeds `threshold_P`, a detection is triggered. threshold_S: float, default to 0.6 S-wave identification threshold. When PhaseNet's raw output (proba) exceeds `threshold_S`, a detection is triggered. """ if format == 'hdf5': data_reader = DataReader_pred( format='hdf5', data_list='', # not used with hdf5 format hdf5_file=data_file, hdf5_group='data', amplitude=amplitude) elif format == 'ram': data_reader = DataReader_pred( format='ram', data=data, amplitude=amplitude) PhaseNet_proba, PhaseNet_picks = pred_fn( data_reader, model_dir=model_path, log_dir=log_dir, batch_size=batch_size, input_length=input_length, min_p_prob=threshold_P, min_s_prob=threshold_S, **kwargs) if format == 'hdf5': # PhaseNet does not take care of closing the hdf5 file data_reader.h5.close() return PhaseNet_proba, PhaseNet_picks def automatic_picking(data, station_names, PN_base=None, PN_dataset_name=None, format='ram', mini_batch_size=126, threshold_P=0.6, threshold_S=0.6, **kwargs): """Wrapper function to call PhaseNet from a python script. Extra kwargs are passed to `phasenet.predict_fn.pred_fn`. Parameters ----------- data: (n_events, n_stations, 3, n_samples) nd.array Numpy array with the continuous 3-component seismograms of `n_events` earthquakes recorded at a network of `n_stations` stations. station_names: list or array of strings Name of the `n_stations` stations of the array, in the same order as given in `data`. PN_base: string, default to None Path to the root folder where PhaseNet formatted data will be stored. Required if `format='ram'`. PN_dataset_name: string, default to None Name of the folder, inside `PN_base`, where the formatted data of a given experiment will be stored. Required if `format='ram'`. mini_batch_size: int, default to 126 Number of 3-component seismograms processed by PhaseNet at once. This should to take into account the machine's RAM. threshold_P: float, default to 0.6 P-wave identification threshold. When PhaseNet's raw output (proba) exceeds `threshold_P`, a detection is triggered. threshold_S: float, default to 0.6 S-wave identification threshold. When PhaseNet's raw output (proba) exceeds `threshold_S`, a detection is triggered. Returns --------- PhaseNet_probas: (n_events, n_stations, n_samples, 2) numpy.narray, float Probabilities of P- and S-wave arrival on the continuous time axis. PhaseNet_probas[..., 0] is the P-wave probability. PhaseNet_probas[..., 1] is the S-wave probability. PhaseNet_picks: dictionary Dictionary with four fields: 'P_proba', 'P_picks', 'S_proba', 'S_picks'. Each of these fields contains another dictionary with one entry per station. Finally, the content of each PhaseNet_picks[field][station] is an (n_events, numpy.ndarrays) array of arrays with all picks and associated probabilities for each event. """ if format == 'hdf5': if not os.path.isdir(PN_base): print(f'Creating the formatted data root folder at {PN_base}') os.mkdir(PN_base) # clean up input/output directories if necessary root_PN_inputs = os.path.join(PN_base, PN_dataset_name) if not os.path.isdir(root_PN_inputs): print(f'Creating the experiment root folder at {root_PN_inputs}') os.mkdir(root_PN_inputs) else: PN_base = '' root_PN_inputs = '' # assume the data were provided in the shape # (n_events x n_stations x 3-comp x time_duration) n_events = data.shape[0] n_stations = data.shape[1] input_length = data.shape[3] # for efficiency, we merge the event and the station axes batch_size = n_events*n_stations print('n events: {:d}, n stations: {:d}, batch size (n events x n stations): {:d}'. format(n_events, n_stations, batch_size)) data = data.reshape(batch_size, 3, input_length) # make sure the minibatch size is not larger than the # total number of traces minibatch_size = min(mini_batch_size, batch_size) # generate the input files necessary for PhaseNet if format == 'hdf5': format_data_hdf5(data, root_PN_inputs=root_PN_inputs) data_pn = None elif format == 'ram': data_pn = format_data_ram(data) # call PhaseNet PhaseNet_proba, PhaseNet_picks = run_pred( input_length, data_file=os.path.join(root_PN_inputs, 'data.h5'), log_dir=os.path.join(root_PN_inputs, 'log'), batch_size=mini_batch_size, threshold_P=threshold_P, threshold_S=threshold_S, format=format, data=data_pn, **kwargs) # the new PhaseNet_proba is an array of time series with [..., 0] = proba of P arrival # and [..., 1] = proba of S arrival (the original [..., 0] was simply 1 - Pp - Ps) PhaseNet_proba = PhaseNet_proba.reshape((n_events, n_stations, input_length, 3))[..., 1:] PhaseNet_picks = PhaseNet_picks.reshape((n_events, n_stations, 2, 2)) # return picks in a comprehensive python dictionary picks = {} picks['P_picks'] = {} picks['P_proba'] = {} picks['S_picks'] = {} picks['S_proba'] = {} for s in range(n_stations): # (n_events, arrays): array of arrays with all detected P-arrival picks picks['P_picks'][station_names[s]] = PhaseNet_picks[:, s, 0, 0] # (n_events, arrays): array of arrays with probabilities of all detected P-arrival picks picks['P_proba'][station_names[s]] = PhaseNet_picks[:, s, 0, 1] # (n_events, arrays): array of arrays with all detected S-arrival picks picks['S_picks'][station_names[s]] = PhaseNet_picks[:, s, 1, 0] # (n_events, arrays): array of arrays with probabilities of all detected S-arrival picks picks['S_proba'][station_names[s]] = PhaseNet_picks[:, s, 1, 1] if format == 'hdf5': # clean up when done shutil.rmtree(root_PN_inputs) return PhaseNet_proba, picks # -------------------------------------------------------------------------------- # The following functions were tailored for template matching applications # -------------------------------------------------------------------------------- def get_best_picks(picks, buffer_length=50): """Filter picks to keep the best one on each 3-comp seismogram. """ for st in picks['P_picks'].keys(): for n in range(len(picks['P_picks'][st])): pp = picks['P_picks'][st][n] ps = picks['S_picks'][st][n] # ---------------- # remove picks form the buffer length valid_P_picks = picks['P_picks'][st][n] > int(buffer_length) valid_S_picks = picks['S_picks'][st][n] > int(buffer_length) picks['P_picks'][st][n] = picks['P_picks'][st][n][valid_P_picks] picks['S_picks'][st][n] = picks['S_picks'][st][n][valid_S_picks] picks['P_proba'][st][n] = picks['P_proba'][st][n][valid_P_picks] picks['S_proba'][st][n] = picks['S_proba'][st][n][valid_S_picks] # take only the highest probability trigger if len(picks['S_picks'][st][n]) > 0: best_S_trigger = picks['S_proba'][st][n].argmax() picks['S_picks'][st][n] = picks['S_picks'][st][n][best_S_trigger] picks['S_proba'][st][n] = picks['S_proba'][st][n][best_S_trigger] # update P picks: keep only those that are before the best S pick valid_P_picks = picks['P_picks'][st][n] < picks['S_picks'][st][n] picks['P_picks'][st][n] = picks['P_picks'][st][n][valid_P_picks] picks['P_proba'][st][n] = picks['P_proba'][st][n][valid_P_picks] else: # if no valid S pick: fill in with nan picks['S_picks'][st][n] = np.nan picks['S_proba'][st][n] = np.nan if len(picks['P_picks'][st][n]) > 0: best_P_trigger = picks['P_proba'][st][n].argmax() picks['P_picks'][st][n] = picks['P_picks'][st][n][best_P_trigger] picks['P_proba'][st][n] = picks['P_proba'][st][n][best_P_trigger] else: # if no valid P pick: fill in with nan picks['P_picks'][st][n] = np.nan picks['P_proba'][st][n] = np.nan # convert picks to float to allow NaNs picks['P_picks'][st] = np.float32(picks['P_picks'][st]) picks['S_picks'][st] = np.float32(picks['S_picks'][st]) picks['P_proba'][st] = np.float32(picks['P_proba'][st]) picks['S_proba'][st] = np.float32(picks['S_proba'][st]) return picks def get_all_picks(picks, buffer_length=50): """Combine all picks from multiple events (1 station) in one array. This function makes sense when the (n_events, n_stations, n_components, n_samples) `data` array given to `automatic_picking` is an array of `n_events` similar earthquakes (i.e. similar locations, and therefore similar expected picks). Then, each station has potentially many P-wave and S-wave picks with which we can define a mean value and an error (see `fit_probability_density`). Parameters --------------- picks: dictionary Picks returned by `automatic_picking`. buffer_length: int, default to 50 Due to some edge effects, PhaseNet tends to trigger false detections at the beginning of a 3-comp seismogram. `buffer_length` is the time, in samples, to ignore at the beginning. Returns ----------- picks: dictionary A dictionary with 4 fields: `P_picks`, 'S_picks', 'P_proba', 'S_proba', and each of these fields is itself a dictionary for one entry for each station. Example: picks['P_picks']['station1'] = [124, 123, 126, 250] means that 4 P-wave picks were identified on station1, with possibly one outlier at sample 250. """ for st in picks['P_picks'].keys(): P_picks = [] P_proba = [] S_picks = [] S_proba = [] for n in range(len(picks['P_picks'][st])): pp = picks['P_picks'][st][n] ps = picks['S_picks'][st][n] # ---------------- # remove picks from the buffer length valid_P_picks = picks['P_picks'][st][n] > int(buffer_length) valid_S_picks = picks['S_picks'][st][n] > int(buffer_length) picks['P_picks'][st][n] = picks['P_picks'][st][n][valid_P_picks] picks['S_picks'][st][n] = picks['S_picks'][st][n][valid_S_picks] picks['P_proba'][st][n] = picks['P_proba'][st][n][valid_P_picks] picks['S_proba'][st][n] = picks['S_proba'][st][n][valid_S_picks] # take all picks P_picks.extend(picks['P_picks'][st][n].tolist()) P_proba.extend(picks['P_proba'][st][n].tolist()) S_picks.extend(picks['S_picks'][st][n].tolist()) S_proba.extend(picks['S_proba'][st][n].tolist()) picks['P_picks'][st] = np.int32(P_picks) picks['S_picks'][st] = np.int32(S_picks) picks['P_proba'][st] =
np.float32(P_proba)
numpy.float32
"""Auditory Filterbanks and scales for Speech and Audio Analysis. The Gammatone filterbank is a direct translation of <NAME>' Gammatone-like spectrograms package [1], which is partly and a direct translation of Malcolm Slaney's Auditory toolbox [2]. References: [1]: https://labrosa.ee.columbia.edu/matlab/gammatonegram/ [2]: https://engineering.purdue.edu/~malcolm/interval/1998-010/ """ import numpy as np from scipy import signal from .util import fftfreqz, freqz def dft2mel(nfft, sr=8000., nfilts=0, width=1., minfrq=0., maxfrq=4000., sphinx=False, constamp=True): """Map linear discrete frequencies to Mel scale.""" if nfilts == 0: nfilts = np.int(np.ceil(hz2mel(np.array([maxfrq]), sphinx)[0]/2)) weights = np.zeros((nfilts, nfft)) # dft index -> linear frequency in hz dftfrqs = np.arange(nfft/2+1, dtype=np.float)/nfft * sr maxmel, minmel = hz2mel(np.array([maxfrq, minfrq]), sphinx) binfrqs = mel2hz(minmel+np.linspace(0., 1., nfilts+2) * (maxmel-minmel), sphinx) for i in range(nfilts): fs = binfrqs[i:i+3].copy() fs = fs[1] + width*(fs-fs[1]) # adjust bandwidth if needed loslope = (dftfrqs - fs[0])/(fs[1] - fs[0]) hislope = (fs[2] - dftfrqs)/(fs[2] - fs[1]) weights[i, 0:nfft/2+1] = np.maximum(0, np.minimum(loslope, hislope)) if constamp: # Slaney-style mel is scaled to be approx constant E per channel weights = np.diag( 2/(binfrqs[2:nfilts+2]-binfrqs[:nfilts])).dot(weights) weights[:, nfft/2+1:] = 0 # avoid aliasing return weights, binfrqs[1:] def hz2dft(freq, sr, nfft): """Map frequency in Hz to discrete Fourier transform bins. Parameters ---------- freq: array_like Frequency in hz sr: int Sampling rate in hz nfft: int Number of DFT bins in range [0, 2*pi) Returns ------- bins: array_like Frequency bin numbers """ return (freq/sr * nfft).astype('int') def hz2mel(f, sphinx=True): """Convert linear frequency to mel frequency scale.""" if sphinx: return 2595. * np.log10(1+f/700.) # match Slaney's toolbox f0, f_sp, brkfrq = 0., 200./3, 1000. brkpt = (brkfrq - f0) / f_sp logstep = np.exp(np.log(6.4)/27.) z = np.empty_like(f) lower = f < brkfrq # np.less(f,brkfrq) higher = np.logical_not(lower) z[lower] = (f[lower] - f0) / f_sp z[higher] = brkpt +
np.log(f[higher]/brkfrq)
numpy.log
import argparse import sys import optax import torch import numpy as np import time import jax import jax.numpy as jnp import matplotlib as mp import haiku as hk import dill as pickle try: mp.use("Qt5Agg") mp.rc('text', usetex=True) mp.rcParams['text.latex.preamble'] = [r"\usepackage{amsmath}"] import matplotlib.pyplot as plt import matplotlib.patches as mpatches import matplotlib.cm as cm except ImportError: pass import deep_lagrangian_networks.jax_HNN_model as hnn import deep_lagrangian_networks.jax_DeLaN_model as delan import deep_lagrangian_networks.jax_Black_Box_model as black_box from deep_lagrangian_networks.utils import load_dataset, init_env, activations from deep_lagrangian_networks.jax_integrator import symplectic_euler, explicit_euler, runge_kutta_4 def running_mean(x, n): cumsum = np.cumsum(np.concatenate([x[0] * np.ones((n,)), x])) return (cumsum[n:] - cumsum[:-n]) / n if __name__ == "__main__": n_plot = 5 dataset = "uniform" model_id = ["structured", "black_box", "structured", "black_box", "black_box"] module_key = ["DeLaN", "DeLaN", "HNN", "HNN", "Network"] colors = { "DeLaN structured": cm.get_cmap(cm.Set1)(0), "DeLaN black_box": cm.get_cmap(cm.Set1)(1), "HNN structured": cm.get_cmap(cm.Set1)(2), "HNN black_box": cm.get_cmap(cm.Set1)(3), "Network black_box": cm.get_cmap(cm.Set1)(4), } results = {} for i in range(n_plot): with open(f"data/results/{module_key[i]}_{model_id[i]}_{dataset}.pickle", "rb") as file: results[module_key[i] + " " + model_id[i]] = pickle.load(file) if dataset == "char": train_data, test_data, divider, dt = load_dataset( filename="data/character_data.pickle", test_label=["e", "q", "v"]) elif dataset == "uniform": train_data, test_data, divider, dt = load_dataset( filename="data/uniform_data.pickle", test_label=["Test 0", "Test 1", "Test 2"]) else: raise ValueError vpt_th = 1.e-2 for i in range(n_plot): key = f"{module_key[i]} {model_id[i]}" n_seeds = results[key]['forward_model']['q_error'].shape[0] xd_error = np.mean(results[key]['forward_model']['xd_error']), 2. * np.std(results[key]['forward_model']['xd_error']) n_test = 2 vpt = np.zeros((0, n_test)) for i in range(n_seeds): vpt_i = [] for j in range(n_test): traj = np.concatenate([ results[key]['forward_model']['q_error'][i, divider[j]:divider[j+1]], results[key]['forward_model']['q_error'][i, -1:] * 0.0 + 1.]) vpt_i = vpt_i + [np.argwhere(traj >= vpt_th)[0, 0]] vpt = np.concatenate([vpt, np.array([vpt_i])]) vpt = np.mean(vpt), np.std(vpt) unit = r"\text{s}" string = f"${xd_error[0]:.1e}{'}'} \pm {xd_error[1]:.1e}{'}'}$ & ${vpt[0]*dt:.2f}{unit} \pm {vpt[1]*dt:.2f}{unit}$ \\\\".replace("e-", r"\mathrm{e}{-").replace("e+", r"\mathrm{e}{+") print(f"{key:20} - " + string) test_labels, test_qp, test_qv, test_qa, test_p, test_pd, test_tau, test_m, test_c, test_g = test_data tau_g, tau_c, tau_m, tau = jnp.array(test_g), jnp.array(test_c), jnp.array(test_m), jnp.array(test_tau) q, qd, qdd = jnp.array(test_qp), jnp.array(test_qv), jnp.array(test_qa) p, pd = jnp.array(test_p), jnp.array(test_pd) dHdt = jax.vmap(jnp.dot, [0, 0])(qd, tau) H = jnp.concatenate([dt * jnp.cumsum(dHdt[divider[i]: divider[i+1]]) for i in range(3)]) def smoothing(x): return np.concatenate([running_mean(x[divider[i]:divider[i + 1]], 10) for i in range(3)]) print("\n################################################") print("Plotting Performance:") # Alpha of the graphs: plot_alpha = 0.8 y_offset = -0.15 n_test = 2 # Plot the performance: q_low = np.clip(1.5 * np.min(np.array(q), axis=0), -np.inf, -0.01) q_max = np.clip(1.5 * np.max(np.array(q), axis=0), 0.01, np.inf) if dataset == "char": q_max = np.array([0.25, 3.]) q_low = np.array([-1.25, 1.]) qd_low = np.clip(1.5 * np.min(qd, axis=0), -np.inf, -0.01) qd_max = np.clip(1.5 * np.max(qd, axis=0), 0.01, np.inf) p_low = np.clip(1.2 * np.min(p, axis=0), -np.inf, -0.01) p_max = np.clip(1.2 * np.max(p, axis=0), 0.01, np.inf) H_lim = [-0.01, +0.01] if dataset == "uniform" else [-2.75, +2.75] err_min, err_max = 1.e-5, 1.e3 plt.rc('text', usetex=True) color_i = ["r", "b", "g", "k"] ticks = np.array(divider) ticks = (ticks[:-1] + ticks[1:]) / 2 fig = plt.figure(figsize=(24.0 / 1.54, 8.0 / 1.54), dpi=100) fig.subplots_adjust(left=0.06, bottom=0.12, right=0.98, top=0.95, wspace=0.24, hspace=0.2) fig.canvas.set_window_title('') legend = [ mp.patches.Patch(color=colors["DeLaN structured"], label="DeLaN - Structured Lagrangian"), mp.patches.Patch(color=colors["DeLaN black_box"], label="DeLaN - Black-Box Lagrangian"), mp.patches.Patch(color=colors["HNN structured"], label="HNN - Structured Hamiltonian"), mp.patches.Patch(color=colors["HNN black_box"], label="HNN - Black-Box Hamiltonian"), mp.patches.Patch(color=colors["Network black_box"], label="Feed-Forward Network"), mp.patches.Patch(color="k", label="Ground Truth")] ax0 = fig.add_subplot(3, 4, 1) ax0.set_title(r"Generalized Position $\mathbf{q}$") ax0.text(s=r"\textbf{Joint 0}", x=-0.25, y=.5, fontsize=12, fontweight="bold", rotation=90, horizontalalignment="center", verticalalignment="center", transform=ax0.transAxes) ax0.set_ylabel(r"$\mathbf{q}_0$ [Rad]") ax0.get_yaxis().set_label_coords(-0.2, 0.5) ax0.set_ylim(q_low[0], q_max[0]) ax0.set_xticks(ticks) ax0.set_xticklabels(test_labels) [ax0.axvline(divider[i], linestyle='--', linewidth=1.0, alpha=1., color="k") for i in range(len(divider))] ax0.set_xlim(divider[0], divider[n_test]) ax0.yaxis.set_label_coords(y_offset, 0.5) ax1 = fig.add_subplot(3, 4, 5) ax1.text(s=r"\textbf{Joint 1}", x=-.25, y=0.5, fontsize=12, fontweight="bold", rotation=90, horizontalalignment="center", verticalalignment="center", transform=ax1.transAxes) ax1.set_ylabel(r"$\mathbf{q}_1$ [Rad]") ax1.get_yaxis().set_label_coords(-0.2, 0.5) ax1.set_ylim(q_low[1], q_max[1]) ax1.set_xticks(ticks) ax1.set_xticklabels(test_labels) [ax1.axvline(divider[i], linestyle='--', linewidth=1.0, alpha=1., color="k") for i in range(len(divider))] ax1.set_xlim(divider[0], divider[n_test]) ax1.yaxis.set_label_coords(y_offset, 0.5) ax2 = fig.add_subplot(3, 4, 9) ax2.text(s=r"\textbf{Error}", x=-.25, y=0.5, fontsize=12, fontweight="bold", rotation=90, horizontalalignment="center", verticalalignment="center", transform=ax2.transAxes) ax2.text(s=r"\textbf{(a)}", x=.5, y=-0.35, fontsize=12, fontweight="bold", horizontalalignment="center", verticalalignment="center", transform=ax2.transAxes) ax2.get_yaxis().set_label_coords(-0.2, 0.5) ax2.set_xticks(ticks) ax2.set_xticklabels(test_labels) [ax2.axvline(divider[i], linestyle='--', linewidth=1.0, alpha=1., color="k") for i in range(len(divider))] ax2.set_xlim(divider[0], divider[n_test]) ax2.set_ylim(err_min, err_max) ax2.set_yscale('log') ax2.set_ylabel(r"Position Error") ax2.yaxis.set_label_coords(y_offset, 0.5) ax2.axhline(vpt_th, color="k", linestyle="--") # Plot Ground Truth Torque: ax0.plot(q[:, 0], color="k") ax1.plot(q[:, 1], color="k") # Plot DeLaN Torque: for key in results.keys(): color = colors[key] q_pred = results[key]["forward_model"]["q_pred"] q_error = results[key]["forward_model"]["q_error"] q_pred_min, q_pred_mean, q_pred_max = np.min(q_pred, axis=0), np.median(q_pred, axis=0),
np.max(q_pred, axis=0)
numpy.max
import os import os.path as osp import tempfile import numpy as np import pytest import torch from mmhuman3d.models import HybrIK_trainer, HybrIKHead from mmhuman3d.models.builder import build_body_model from mmhuman3d.models.utils.inverse_kinematics import ( batch_get_3children_orient_svd, batch_get_pelvis_orient, batch_get_pelvis_orient_svd, batch_inverse_kinematics_transform, ) def generate_weights(output_dir): """Generate a SMPL model weight file to initialize SMPL model, and generate a 3D joints regressor file.""" if not os.path.exists(output_dir): os.makedirs(output_dir) joint_regressor_file = os.path.join(output_dir, 'J_regressor_h36m.npy') np.save(joint_regressor_file, np.zeros([17, 6890])) smpl_mean_file = os.path.join(output_dir, 'h36m_mean_beta.npy') np.save(smpl_mean_file, np.zeros([ 10, ])) return def test_HybrIK_head(): tmpdir = tempfile.TemporaryDirectory() # generate weight file for SMPL model. generate_weights(tmpdir.name) # initialize models head = HybrIKHead( smpl_mean_params=osp.join(tmpdir.name, 'h36m_mean_beta.npy')) smpl = build_body_model( dict( type='HybrIKSMPL', model_path='data/body_models/smpl', extra_joints_regressor=osp.join(tmpdir.name, 'J_regressor_h36m.npy'))) if torch.cuda.is_available(): head = head.cuda() smpl = smpl.cuda() with pytest.raises(TypeError): _ = HybrIKHead() with pytest.raises(TypeError): _ = HybrIKHead( feature_channel=[512, 8], smpl_mean_params='data/body_models/h36m_mean_beta.npy') # mock inputs batch_size = 4 input_shape = (batch_size, 512, 8, 8) mm_inputs = _demo_head_inputs(input_shape) features = mm_inputs.pop('features') trans_inv = mm_inputs.pop('trans_inv') joint_root = mm_inputs.pop('joint_root') depth_factor = mm_inputs.pop('depth_factor') intrinsic_param = mm_inputs.pop('intrinsic_param') if torch.cuda.is_available(): predictions = head(features, trans_inv, intrinsic_param, joint_root, depth_factor, smpl) pred_keys = [ 'pred_phi', 'pred_delta_shape', 'pred_shape', 'pred_pose', 'pred_uvd_jts', 'pred_xyz_jts_24', 'pred_xyz_jts_24_struct', 'pred_xyz_jts_17', 'pred_vertices', 'maxvals' ] for k in pred_keys: assert k in predictions assert predictions[k].shape[0] == batch_size with pytest.raises(RuntimeError): joint_root = torch.zeros((6, 3)).cuda() _ = head(features, trans_inv, intrinsic_param, joint_root, depth_factor, smpl) with pytest.raises(RuntimeError): joint_root = torch.zeros((batch_size, 3)) _ = head(features, trans_inv, intrinsic_param, joint_root, depth_factor, smpl) tmpdir.cleanup() def test_HybrIK_trainer(): tmpdir = tempfile.TemporaryDirectory() # generate weight file for SMPL model. generate_weights(tmpdir.name) model_cfg = dict( backbone=dict( type='ResNet', depth=34, out_indices=[3], norm_eval=False, init_cfg=dict( type='Pretrained', checkpoint='torchvision://resnet34')), head=dict( type='HybrIKHead', smpl_mean_params=osp.join(tmpdir.name, 'h36m_mean_beta.npy')), body_model=dict( type='HybrIKSMPL', model_path= # noqa: E251 'data/body_models/smpl', extra_joints_regressor=osp.join(tmpdir.name, 'J_regressor_h36m.npy')), loss_beta=dict(type='MSELoss', loss_weight=1), loss_theta=dict(type='MSELoss', loss_weight=0.01), loss_twist=dict(type='MSELoss', loss_weight=0.01), loss_uvd=dict(type='L1Loss', loss_weight=1), ) model = HybrIK_trainer(**model_cfg) if torch.cuda.is_available(): model = model.cuda() input_shape = (4, 3, 256, 256) mm_inputs = _demo_mm_inputs(input_shape) img = mm_inputs.pop('img') img_metas = mm_inputs.pop('img_metas') if torch.cuda.is_available(): output = model.forward_train(img, img_metas, **mm_inputs) assert isinstance(output, dict) assert 'loss_beta' in output assert output['loss_beta'].dtype == torch.float32 with torch.no_grad(): output = model.forward_test(img, img_metas, **mm_inputs) assert isinstance(output, dict) for k in ['vertices', 'xyz_17', 'uvd_jts', 'xyz_24', 'image_path']: assert k in output tmpdir.cleanup() def test_IK_functions(): N = 4 mm_inputs = _demo_IK_inputs(N) pose_skeleton = mm_inputs['pose_skeleton'] phis = mm_inputs['phis'] rest_pose = mm_inputs['rest_pose'] children = mm_inputs['children'] parents = mm_inputs['parents'] rel_pose_skeleton = mm_inputs['rel_pose_skeleton'] rel_rest_pose = mm_inputs['rel_rest_pose'] rot_mat_chain_parent = mm_inputs['rot_mat_chain_parent'] global_orient = None dtype = torch.float32 rot_mat, rot_rest_pose = batch_inverse_kinematics_transform( pose_skeleton, global_orient, phis, rest_pose, children, parents, dtype, train=False, leaf_thetas=None) assert rot_mat.shape == (N, 24, 3, 3) assert rot_rest_pose.shape == (N, 29, 3) rot_mat, rot_rest_pose = batch_inverse_kinematics_transform( pose_skeleton, global_orient, phis, rest_pose, children, parents, dtype, train=True, leaf_thetas=None) assert rot_mat.shape == (N, 24, 3, 3) assert rot_rest_pose.shape == (N, 29, 3) global_orient_mat = batch_get_pelvis_orient(rel_pose_skeleton.clone(), rel_rest_pose.clone(), parents, children, dtype) assert global_orient_mat.shape == (N, 3, 3) global_orient_mat = batch_get_pelvis_orient_svd(rel_pose_skeleton.clone(), rel_rest_pose.clone(), parents, children, dtype) assert global_orient_mat.shape == (N, 3, 3) rot_mat = batch_get_3children_orient_svd(rel_pose_skeleton, rel_rest_pose, rot_mat_chain_parent, children, dtype) assert rot_mat.shape == (N, 3, 3) def _demo_mm_inputs(input_shape=(1, 3, 256, 256)): """Create a superset of inputs needed to run test or train batches. Args: input_shape (tuple): input batch dimensions """ (N, C, H, W) = input_shape rng = np.random.RandomState(0) imgs = rng.rand(*input_shape) trans_inv = np.zeros([N, 2, 3]) intrinsic_param = np.zeros([N, 3, 3]) joint_root = np.zeros([N, 3]) depth_factor = np.ones([N, 1]) target_uvd_29 = np.zeros([N, 87]) target_xyz_24 = np.zeros([N, 72]) target_weight_24 = np.ones([N, 72]) target_weight_29 = np.ones([N, 87]) target_xyz_17 = np.zeros([N, 51]) target_weight_17 = np.ones([N, 51]) target_theta = np.zeros([N, 96]) target_beta = np.zeros([N, 10]) target_smpl_weight = np.ones([N, 1]) target_theta_weight = np.ones([N, 96]) target_twist = np.zeros([N, 23, 2]) target_twist_weight = np.ones([N, 23, 2]) bbox = np.zeros([N, 4]) img_metas = [{ 'img_shape': (H, W, C), 'center': np.array([W / 2, H / 2]), 'scale': np.array([0.5, 0.5]), 'rotation': 0, 'image_path': '<demo>.png', } for _ in range(N)] mm_inputs = { 'img': torch.FloatTensor(imgs).requires_grad_(True), 'trans_inv': torch.FloatTensor(trans_inv), 'intrinsic_param': torch.FloatTensor(intrinsic_param), 'joint_root': torch.FloatTensor(joint_root), 'depth_factor': torch.FloatTensor(depth_factor), 'target_uvd_29': torch.FloatTensor(target_uvd_29), 'target_xyz_24': torch.FloatTensor(target_xyz_24), 'target_weight_24': torch.FloatTensor(target_weight_24), 'target_weight_29': torch.FloatTensor(target_weight_29), 'target_xyz_17': torch.FloatTensor(target_xyz_17), 'target_weight_17': torch.FloatTensor(target_weight_17), 'target_theta': torch.FloatTensor(target_theta), 'target_beta': torch.FloatTensor(target_beta), 'target_smpl_weight': torch.FloatTensor(target_smpl_weight), 'target_theta_weight': torch.FloatTensor(target_theta_weight), 'target_twist': torch.FloatTensor(target_twist), 'target_twist_weight': torch.FloatTensor(target_twist_weight), 'bbox': torch.FloatTensor(bbox), 'img_metas': img_metas, 'sample_idx':
np.arange(N)
numpy.arange
# ---------------------- # eval script for youcook2 # writen by <NAME> # ---------------------- from __future__ import division import numpy as np import os import json import pickle from tqdm import tqdm from nltk.stem import WordNetLemmatizer from nltk.corpus import wordnet import pdb def parse_gt(root, dataset, gt_file, anno_file, vid_list_file, class_file, img_size, sample_num, thr, phase): """parse the box annotation :param: dataset: dataset Name :param gt_file: file name for store the box gt json file :param anno_file: file name for store the segment time json file :param vid_list_file: video list file name :param class_path: file name for store the class label, stored in ./data/YouCookII/annotations/, each row is the name of a class :param img_size: tuple (h, w) to specify the image size. :param sample_num: sample number [int], 5: sample 5 frames per segment; 0: sample all frames per segment :param thr: iou thresh greater than which the box and gt are matched :output recs: record list of dict, each dict for a box, dict['label'], ['bbox'], ['thr'], ['img_idx'] """ # load bbox ground truth gt_path = os.path.join(root, dataset, 'annotations', gt_file) with open(gt_path) as f: gt = json.load(f) # load anno segment time grond truth anno_path = os.path.join(root, dataset, 'annotations', anno_file) with open(anno_path) as f: anno = json.load(f) # get vid_ids: vid_list_path = os.path.join(root, dataset, 'split', vid_list_file) vid_ids = [] with open(vid_list_path) as f: for line in f: vid_ids.append(line.rstrip('\n').split('/')[-1]) # get class label list class_list_path = os.path.join(root, dataset, 'annotations', class_file) class_list = [] with open(class_list_path) as f: for line in f: class_list.append(line.rstrip('\n')) # Parse the box annotation print('start parsing box annotation...') # iterate over each vid img_count = 0 # recs: store the all dict of labels. recs = [] for vid_ind, vid_id in enumerate(tqdm(vid_ids)): h = gt['database'][vid_id]['height'] w = gt['database'][vid_id]['width'] eta_y = img_size[0]/h eta_x = img_size[1]/w # segs: list of tuple [(start, end)] segs = [anno['database'][vid_id]['annotations'][i]['segment'] for i in range(len(anno['database'][vid_id]['annotations']))] # seg_list: labeled seg seg_list = [int(i) for i in gt['database'][vid_id]['annotations'].keys()] seg_list.sort() for seg_ind, seg in enumerate(segs): # if the curretn segment is labeled if seg_ind in seg_list: obj_list = [int(i) for i in gt['database'][vid_id]['annotations'][str(seg_ind)].keys()] obj_list.sort() # frame_length: number of frame in the video segment frame_length = seg[1] - seg[0] # get the sampled frame index (if we are evaluation, we do not need to sample, all frames are needed) # two senarios (now I only have the sampled data, so we need the sampled) # 1. iterate over sampled frames # 2. iterate over all frames frame_list = range(frame_length) if sample_num == 0 else np.round(np.linspace(0, frame_length-1, sample_num)).astype('int') # iterate over frame for frame_ind in frame_list: rec = {} labels = [] bboxes = [] thrs = [] img_ids = [] # iterate over each box, if the frame id is labeled for obj_ind in range(len(gt['database'][vid_id]['annotations'][str(seg_ind)].keys())): # judge if the box should append to annotations if (gt['database'][vid_id]['annotations'][str(seg_ind)][str(obj_ind)]['boxes'][str(frame_ind)]['outside'] == 0 and gt['database'][vid_id]['annotations'][str(seg_ind)][str(obj_ind)]['boxes'][str(frame_ind)]['occluded'] == 0): label = gt['database'][vid_id]['annotations'][str(seg_ind)][str(obj_ind)]['label'] # lemmatize the label lemmatizer = WordNetLemmatizer() label = str(lemmatizer.lemmatize(label, pos=wordnet.NOUN)) bbox = [gt['database'][vid_id]['annotations'][str(seg_ind)][str(obj_ind)]['boxes'][str(frame_ind)]['{}'.format(axis)] for axis in ['xtl', 'ytl', 'xbr', 'ybr']] bbox[0], bbox[2] = bbox[0]*eta_x, bbox[2]*eta_x bbox[1], bbox[3] = bbox[1]*eta_y, bbox[3]*eta_y bbox = [int(i) for i in bbox] labels.append(label) bboxes.append(bbox) thrs.append(thr) img_ids.append(img_count) rec['label'] = labels rec['bbox'] = bboxes rec['thr'] = thrs rec['img_ids'] = img_ids img_count += 1 recs.append(rec) # if the segment is not labelled, padded with emtpy recs, but each frame should have one. else: # frame_length: number of frame in the video segment frame_length = seg[1] - seg[0] # get the sampled frame index (if we are evaluation, we do not need to sample, all frames are needed) # two senarios (now I only have the sampled data, so we need the sampled) # 1. iterate over sampled frames # 2. iterate over all frames frame_list = range(frame_length) if sample_num == 0 else np.round(np.linspace(0, frame_length-1, sample_num)).astype('int') # iterate over frame for frame_ind in frame_list: rec = {} rec['label'] = [] rec['bbox'] = [] rec['thr'] = [] rec['img_ids'] = [] recs.append(rec) img_count += 1 # write recs to pkl cache file cache_path = os.path.join('cache', dataset, 'gtbox_{}_sample_{}.pkl'.format(phase, sample_num)) with open(cache_path, 'wb') as f: pickle.dump(recs, f) return recs def phrase_accuracy(recs, dets, class_list): """ evaluate the detection result by phrase assume the frame number in recs are the same with the frame number in dets param: dets: detection result, list of four lists [[img_ids], [obj_labels], [obj_bboxes], [obj_confs]] param: recs: record list of gt dict, each dict for a box, dict['label'], ['bbox'], ['thr'], ['img_idx'] param: class_list: list of all class """ # accuracy calculation method: for each phrase, it will ground the box in the current segmentation. img_ids = np.array(dets[0]) obj_labels = np.array(dets[1]) obj_bboxes = np.array(dets[2]) obj_confs = np.array(dets[3]) # sort by img_ids order = np.argsort(img_ids) img_ids = img_ids[order] obj_labels = obj_labels[order] obj_bboxes = obj_bboxes[order] obj_confs = obj_confs[order] gt_img_id = recs[-1]['img_ids'] # store labels, confs, bboxes w.r.t image cell obj_confs = obj_confs[order] num_imgs = np.max(img_ids) + 1 obj_labels_cell = [None] * num_imgs obj_confs_cell = [None] * num_imgs obj_bboxes_cell = [None] * num_imgs start_i = 0 id = img_ids[0] for i in range(0, len(img_ids)): if i == len(img_ids) - 1 or img_ids[i + 1] != id: conf = obj_confs[start_i:i + 1] label = obj_labels[start_i:i + 1] bbox = obj_bboxes[start_i:i + 1, :] sorted_inds = np.argsort(-conf) obj_labels_cell[id] = label[sorted_inds] obj_confs_cell[id] = conf[sorted_inds] obj_bboxes_cell[id] = bbox[sorted_inds, :] if i < len(img_ids) - 1: id = img_ids[i + 1] start_i = i + 1 # calculation accuracy w.r.t box. If the ground of the phrase class is correct, then it is matched. match_num = 0 # construct incremental class label # class match list: list of the matched number in each class class_match_count = np.zeros(len(class_list), dtype=int) # class_count: count how many is counted class_count = np.zeros(len(class_list), dtype=int) for img_id in range(num_imgs): rec = recs[img_id] # first loop det obj_dict = {} if obj_bboxes_cell[img_id] is None: continue for obj_label, obj_conf, obj_bbox in zip(obj_labels_cell[img_id], obj_confs_cell[img_id], obj_bboxes_cell[img_id]): # second loop gt for gt_label, gt_bbox, thr, img_idx in zip(rec['label'], rec['bbox'], rec['thr'], rec['img_ids']): # calculate IoU if obj_label != gt_label: continue # now obj_label == gt_label, check if it is the first time to add obj_label to obj_dict if not obj_label in obj_dict.keys(): obj_dict[obj_label] = 0 class_ind = class_list.index(gt_label) class_count[class_ind] += 1 elif obj_dict[obj_label] == 1: # if the object has been matched, continue continue bi = [ np.max((obj_bbox[0], gt_bbox[0])), np.max((obj_bbox[1], gt_bbox[1])), np.min((obj_bbox[2], gt_bbox[2])), np.min((obj_bbox[3], gt_bbox[3])) ] iw = bi[2] - bi[0] + 1 ih = bi[3] - bi[1] + 1 if iw > 0 and ih > 0: # compute overlap as area of intersection / area of union ua = (obj_bbox[2] - obj_bbox[0] + 1.) * (obj_bbox[3] - obj_bbox[1] + 1.) + \ (gt_bbox[2] - gt_bbox[0] + 1.) * \ (gt_bbox[3] - gt_bbox[1] + 1.) - iw*ih ov = iw * ih / ua # makes sure that this object is detected according # to its individual threshold if ov >= thr: match_num += 1 class_match_count[class_ind] += 1 # to prevent one phrase matched multiple gt. obj_dict[obj_label] = 1 # class class_accuracy = class_match_count/(class_count + 1e-6) cls_mean_accuracy = np.mean(class_accuracy) mean_accuracy = np.sum(class_match_count)/np.sum(class_count) """ # uncomment to see per category accuracy for p in zip(class_list, class_accuracy): print ('{:10s}{:0.2%}'.format(p[0]+':', p[1])) """ print ('macro query accuracy: {:0.2%}'.format(cls_mean_accuracy)) print ('micro query accuracy: {:0.2%}'.format(mean_accuracy)) return cls_mean_accuracy def box_accuracy(recs, dets, class_list): """ evaluate the detection result by phrase assume the frame number in recs are the same with the frame number in dets param: dets: detection result, list of four lists [[img_ids], [obj_labels], [obj_bboxes], [obj_confs]] param: recs: record list of gt dict, each dict for a box, dict['label'], ['bbox'], ['thr'], ['img_idx'] param: class_list: list of all class """ # accuracy calculation method: for each phrase, it will ground the box in the current segmentation. img_ids = np.array(dets[0]) obj_labels = np.array(dets[1]) obj_bboxes = np.array(dets[2]) obj_confs = np.array(dets[3]) # sort by img_ids order = np.argsort(img_ids) img_ids = img_ids[order] obj_labels = obj_labels[order] obj_bboxes = obj_bboxes[order] obj_confs = obj_confs[order] gt_img_id = recs[-1]['img_ids'] # store labels, confs, bboxes w.r.t image cell obj_confs = obj_confs[order] num_imgs = np.max(img_ids) + 1 obj_labels_cell = [None] * num_imgs obj_confs_cell = [None] * num_imgs obj_bboxes_cell = [None] * num_imgs start_i = 0 id = img_ids[0] for i in range(0, len(img_ids)): if i == len(img_ids) - 1 or img_ids[i + 1] != id: conf = obj_confs[start_i:i + 1] label = obj_labels[start_i:i + 1] bbox = obj_bboxes[start_i:i + 1, :] sorted_inds = np.argsort(-conf) obj_labels_cell[id] = label[sorted_inds] obj_confs_cell[id] = conf[sorted_inds] obj_bboxes_cell[id] = bbox[sorted_inds, :] if i < len(img_ids) - 1: id = img_ids[i + 1] start_i = i + 1 # calculation accuracy w.r.t box. If the ground of the phrase class is correct, then it is matched. match_num = 0 # construct incremental class label # class match list: list of the matched number in each class class_match_count = np.zeros(len(class_list), dtype=int) # class_count: count how many is counted class_count = np.zeros(len(class_list), dtype=int) for img_id in range(num_imgs): rec = recs[img_id] # first loop gt for gt_label, gt_bbox, thr, img_idx in zip(rec['label'], rec['bbox'], rec['thr'], rec['img_ids']): class_ind = class_list.index(gt_label) class_count[class_ind] += 1 if obj_bboxes_cell[img_id] is None: continue # second loop dets for obj_label, obj_conf, obj_bbox in zip(obj_labels_cell[img_id], obj_confs_cell[img_id], obj_bboxes_cell[img_id]): # calculate IoU if obj_label != gt_label: continue bi = [ np.max((obj_bbox[0], gt_bbox[0])),
np.max((obj_bbox[1], gt_bbox[1]))
numpy.max
# -*- coding: utf-8 -*- """ Created on Thu May 27 11:53:42 2021 @author: Shubham """ import os, numpy as np import cv2 import random import torch import torch.utils.data as data import xml.etree.ElementTree as ET from abc import ABCMeta, abstractmethod import scipy.cluster.vq as vq import pickle import pandas as pd from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import f_classif from cv2 import imread, resize from numpy import concatenate from sklearn.metrics import accuracy_score from sklearn.semi_supervised import LabelPropagation from sklearn.model_selection import train_test_split import argparse from imblearn.under_sampling import RandomUnderSampler from skimage import feature import warnings warnings.filterwarnings("ignore") """ Data Loader reading the files, extracting individual objects from each image """ class DataLoader(data.Dataset): def __init__(self,data_path="", trainval='trainval',transform=None): self.data_path = data_path self.transform = transform self.trainval = trainval self.__init_classes() self.names, self.labels, self.lable_set, self.bounding_box = self.__dataset_info() def __getitem__(self, index): self.data = [] self.lables = [] x = imread(self.data_path+'JPEGImages/'+self.names[index]+'.jpg') #x = resize(x, (256,256)) #print(self.bounding_box[index]) x_min, y_min, x_max, y_max = self.bounding_box[index] for i in range(len(x_min)): #print(i) sub_img = x[y_min[i]:y_max[i],x_min[i]:x_max[i]] #print(sub_img.shape) #sub_img = resize(sub_img, (64,64)) sub_img = cv2.resize(sub_img, (64, 64), interpolation=cv2.INTER_NEAREST) self.data.append(sub_img) self.lables.append(self.lable_set[index][i]) #print(self.lable_set[index]) #print(len(self.lable_set[index])) #print(len(self.bounding_box[index])) #x = Image.fromarray(x) if self.transform !=None: x = self.transform(x) y = self.labels[index] #return x, y def __fetchdata__(self): return self.data, self.lables def __len__(self): return len(self.names) def __dataset_info(self): #annotation_files = os.listdir(self.data_path+'/Annotations') with open(self.data_path+'ImageSets/Main/'+self.trainval+'.txt') as f: annotations = f.readlines() annotations = [n[:-1] for n in annotations] names = [] labels = [] lable_set = [] bounding_box = [] for af in annotations: filename = os.path.join(self.data_path,'Annotations',af) tree = ET.parse(filename+'.xml') objs = tree.findall('object') num_objs = len(objs) bdg_box = [obj.find('bndbox') for obj in objs] x_min = [int(box.find('xmin').text.lower().strip()) for box in bdg_box] y_min = [int(box.find('ymin').text.lower().strip()) for box in bdg_box] x_max = [int(box.find('xmax').text.lower().strip()) for box in bdg_box] y_max = [int(box.find('ymax').text.lower().strip()) for box in bdg_box] coords = (x_min, y_min, x_max, y_max) boxes_cl = np.zeros((num_objs), dtype=np.int32) temp_lbls = [] for ix, obj in enumerate(objs): cls = self.class_to_ind[obj.find('name').text.lower().strip()] boxes_cl[ix] = cls temp_lbls.append(cls) lbl = np.zeros(self.num_classes) lbl[boxes_cl] = 1 labels.append(lbl) names.append(af) lable_set.append(temp_lbls) bounding_box.append(coords) return np.array(names), np.array(labels).astype(np.float32), lable_set, bounding_box def __init_classes(self): self.classes = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') self.num_classes = len(self.classes) self.class_to_ind = dict(zip(self.classes, range(self.num_classes))) """ local binary pattern """ class LocalBinaryPatterns: def __init__(self, numPoints, radius): # store the number of points and radius self.numPoints = numPoints self.radius = radius def describe(self, image, eps=1e-7): # compute the Local Binary Pattern representation # of the image, and then use the LBP representation # to build the histogram of patterns image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) lbp = feature.local_binary_pattern(image, self.numPoints, self.radius, method="uniform") (hist, _) = np.histogram(lbp.ravel(), bins=np.arange(0, self.numPoints + 3), range=(0, self.numPoints + 2)) # normalize the histogram hist = hist.astype("float") hist /= (hist.sum() + eps) # return the histogram of Local Binary Patterns return hist """ color layout descriptor """ class DescriptorComputer: __metaclass__ = ABCMeta @abstractmethod def compute(self, frame): pass class ColorLayoutComputer(DescriptorComputer): def __init__(self): self.rows = 8 self.cols = 8 self.prefix = "CLD" def compute(self, img): averages = np.zeros((self.rows,self.cols,3)) imgH, imgW, _ = img.shape for row in range(self.rows): for col in range(self.cols): row_start = int(imgH/self.rows * row) row_end = int(imgH/self.rows * (row+1)) col_start = int(imgW/self.cols*col) col_end = int(imgW/self.cols*(col+1)) slice1 = img[row_start:row_end, col_start:col_end] #slice1 = img[imgH/self.rows * row: imgH/self.rows * (row+1), imgW/self.cols*col : imgW/self.cols*(col+1)] #print(slice) average_color_per_row = np.mean(slice1, axis=0) average_color = np.mean(average_color_per_row, axis=0) average_color = np.uint8(average_color) averages[row][col][0] = average_color[0] averages[row][col][1] = average_color[1] averages[row][col][2] = average_color[2] icon = cv2.cvtColor(np.array(averages, dtype=np.uint8), cv2.COLOR_BGR2YCR_CB) y, cr, cb = cv2.split(icon) dct_y = cv2.dct(
np.float32(y)
numpy.float32
#Current key bindings: #left/right arrow: change direction (forward/backword, respectively) and make a step in that direction #space bar: start/stop the animation #from Scientific.IO.NetCDF import NetCDFFile as Dataset import numpy as np from mpl_toolkits.mplot3d import Axes3D from threading import Timer from matplotlib import cm from matplotlib.ticker import LinearLocator, FormatStrFormatter import matplotlib.pyplot as plt from netCDF4 import Dataset i=0 surf=None #1: forward; 0:backward direction=1 #timestep simstep=20 #animation flag animating=False stopped=True #User interaction def key_press_handler(event): global direction global animating global stopped already_animating = animating print('press', event.key) if (event.key=='right'): print("right") direction=1 elif (event.key=='left'): print("left") direction=0 elif (event.key==' '): animating=not animating if (not already_animating and stopped): update() def get_data(filename): data = [] print("Reading from " + filename) ds = Dataset(filename, "r", format="NETCDF4") #if (ds.iteration!=it): print("ERROR: requested it: " + str(it) + "; read it: " + str(ds.iteration)) data[:] = ds.variables["data"] ds.close() return data #last=None def update(): #print('press', event.key) global i global fig global surf global last global simstep global direction global animating global stopped #i = i + (direction)*simstep - (1-direction)*simstep i = i + 2*simstep*direction - simstep if (i<=0): i=0 return #create x,y data (size of plate) x = np.arange(0, 100) y = np.arange(0, 100) #last = solver.compute(i, last); data = get_data("../output/data_" + str(i)) #data = last.data print("update i:%i" % i) xx, yy = np.meshgrid(x, y) #print data ax.clear() ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Temperature') #make the plot surf = ax.plot_surface(xx, yy, data, cmap=cm.coolwarm, vmin=0, vmax=20) #source box (cache/simulator) ctxt = "CACHE" ccolor = "green" fcolor = "black" #ax.text(-10, 8, 27, ctxt, bbox=dict(facecolor=ccolor, alpha=0.5, boxstyle='round,pad=1'), color=fcolor, fontweight='bold', fontsize=12, verticalalignment='center') ax.set_autoscale_on(False) ax.set_zlim(0,20) #fig.canvas.draw() plt.draw() #keep animating if animating==true if (animating): Timer(0.001, update).start() stopped=False else: stopped=True #now this is useless return (xx, yy, data) #init plot fig = plt.figure() ax = fig.gca(projection='3d') #Labels ax.set_xlabel('X Label') ax.set_ylabel('Y Label') ax.set_zlabel('Temperature') ax.zaxis.set_major_locator(LinearLocator(10)) #surf = ax.plot_surface(xx, yy, data) xx,yy,data = update() #install key handlers fig.canvas.mpl_connect('key_press_event', key_press_handler) surf.set_clim(vmin=0, vmax=20) #colorbar fig.colorbar(surf, shrink=0.5, aspect=5)
np.set_printoptions(precision=3)
numpy.set_printoptions
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # MIT License # # Copyright (c) 2019, 2020 MACNICA Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # '''A collection of utility classes for video applications. ''' import sys import queue import threading import cv2 import time import numpy as np import argparse import datetime import logging class VideoAppUtilsError(Exception): pass class VideoAppUtilsEosError(VideoAppUtilsError): pass class VideoAppUtilsDeviceError(VideoAppUtilsError): pass class PipelineWorker(): '''A worker thread for a stage in a software pipeline. This class is an abstruct class. Sub classes inherited from this class should implement a process stage of a software pipeline. +------------------+ +------------------+ +------------------+ | PipelineWorker#1 | | PipelineWorker#2 | | PipelineWorker#3 |<-get() | getData()->(Q)-----> process()->(Q)-----> process()->(Q)-----> +------------------+ +------------------+ +------------------+ Attributes: queue: Queue to store outputs processed by this instance. source: Data source (assumped to be other PipelineWorker instance) destination: Data destination (assumped to be other PipelineWorker instance) sem: Semaphore to lock this instance. flag: If ture, the processing loop is running. numDrops: Total number of dropped outputs. thread: Worker thread runs the _run instance method. ''' def __init__(self, qsize, source=None, drop=True): ''' Args: qsize(int): Output queue capacity source(PipelineWorker): Data source. If ommited, derived class should implement the getData method. ''' self.queue = queue.Queue(qsize) self.source = source if self.source is not None: self.source.destination = self self.drop = drop self.destination = None self.sem = threading.Semaphore(1) self.flag = False self.numDrops = 0 self._error = False def __del__(self): pass def __repr__(self): return '%02d %06d' % (self.qsize(), self.numDrops) def process(self, srcData): '''Data processing(producing) method called in thread loop. Derived classes should implement this method. Args: srcData: Source data ''' return (False, None) def getData(self): '''Returns a output to data consumer. ''' if self.source is None: return None else: return self.source.get() def __run(self): logging.info('%s thread started' % (self.__class__.__name__)) with self.sem: self.flag = True while True: with self.sem: if self.flag == False: break dat = None try: src = self.getData() except VideoAppUtilsEosError: self._error = True logging.info('End of Stream detected') else: try: ret, dat = self.process(src) if ret == False: self._error = True dat = None logging.info('Processing error') except Exception as e: self._error = True dat = None logging.critical(e) if self.drop and self.queue.full(): self.queue.get(block=True) self.numDrops += 1 self.queue.put(dat) logging.info('%s thread terminated' % (self.__class__.__name__)) def clear(self): try: while True: self.queue.get(block=False) except queue.Empty: return def start(self): '''Starts the worker thread. ''' self.thread = threading.Thread(target=self.__run) self.thread.start() def get(self): '''Gets a output. ''' if self._error: logging.info('VideoAppUtilsEosError') raise VideoAppUtilsEosError return None return self.queue.get(block=True) def stop(self): '''Stops the worker thread. ''' with self.sem: self.flag = False self.clear() self.thread.join() def qsize(self): '''Returns the number of the current queued outputs ''' sz = 0 with self.sem: sz = self.queue.qsize() return sz class ContinuousVideoCapture(PipelineWorker): '''Video capture workeer thread ''' GST_STR_CSI = 'nvarguscamerasrc \ ! video/x-raw(memory:NVMM), width=(int)%d, height=(int)%d, \ format=(string)NV12, framerate=(fraction)%d/1 \ ! nvvidconv \ ! video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx \ ! videoconvert \ ! appsink' def __init__(self, cameraId, width, height, \ fps=None, qsize=30, fourcc=None): ''' Args: cameraId(int): Camera device ID, if negative number specified, the CSI camera will be selected. width(int): Capture width height(int): Capture height fps(int): Frame rate qsize(int): Capture queue capacity fourcc(str): Capture format FOURCC string ''' super().__init__(qsize) if cameraId < 0: # CSI camera if fps is None: fps = 30 gstCmd = ContinuousVideoCapture.GST_STR_CSI \ % (width, height, fps, width, height) self.capture = cv2.VideoCapture(gstCmd, cv2.CAP_GSTREAMER) if self.capture.isOpened() is False: raise VideoAppUtilsDeviceError( \ 'CSI camera could not be opened.') else: # USB camera # Open the camera device self.capture = cv2.VideoCapture(cameraId,cv2.CAP_V4L2) if self.capture.isOpened() is False: raise VideoAppUtilsDeviceError( \ 'Camera %d could not be opened.' % (cameraId)) # Set the capture parameters self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, width) self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height) if fourcc is not None: self.capture.set(cv2.CAP_PROP_FOURCC, \ cv2.VideoWriter_fourcc(*fourcc)) if fps is not None: self.capture.set(cv2.CAP_PROP_FPS, fps) # Get the actual frame size # Not work for OpenCV 4.1 self.width = self.capture.get(cv2.CAP_PROP_FRAME_WIDTH) self.height = self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT) def __del__(self): super().__del__() self.capture.release() def getData(self): ret, frame = self.capture.read() if ret == False: raise VideoAppUtilsEosError return frame def process(self, srcData): return (True, srcData) class VideoDecoder(PipelineWorker): GST_STR_DEC_H264 = 'filesrc location=%s \ ! qtdemux name=demux demux.video_0 \ ! queue \ ! h264parse \ ! omxh264dec \ ! nvvidconv \ ! video/x-raw, format=(string)BGRx \ ! videoconvert \ ! appsink' GST_STR_DEC_H265 = 'filesrc location=%s \ ! qtdemux name=demux demux.video_0 \ ! queue \ ! h265parse \ ! omxh265dec \ ! nvvidconv \ ! video/x-raw, format=(string)BGRx \ ! videoconvert \ ! appsink' def __init__(self, file, qsize=30, repeat=False, h265=False): ''' Args: file(str): qsize(int): Capture queue capacity ''' super().__init__(qsize) self.repeat = repeat if h265: self.gstCmd = VideoDecoder.GST_STR_DEC_H265 % (file) else: self.gstCmd = VideoDecoder.GST_STR_DEC_H264 % (file) #self.capture = cv2.VideoCapture(self.gstCmd, cv2.CAP_GSTREAMER) self.capture= cv2.VideoCapture(file) if self.capture.isOpened() == False: raise VideoAppUtilsEosError('%s could not be opened.' % (file)) # Get the frame size self.width = self.capture.get(cv2.CAP_PROP_FRAME_WIDTH) self.height = self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT) self.frames = 0 def __del__(self): super().__del__() self.capture.release() def getData(self): ret, frame = self.capture.read() if ret == False: if self.repeat: # Reopen the video file self.capture.release() self.capture = cv2.VideoCapture(self.gstCmd, cv2.CAP_GSTREAMER) if self.capture.isOpened() == False: raise VideoAppUtilsEosError( \ '%s could not be re-opened.' % (file)) frame = None ret, frame = self.capture.read() if ret == False: raise VideoAppUtilsEosError else: logging.info('End of stream at frame %d' % (self.frames)) raise VideoAppUtilsEosError self.frames += 1 return frame def process(self, srcData): return (True, srcData) class IntervalCounter(): '''A counter to measure the interval between the measure method calls. Attributes: numSamples: Number of samples to calculate the average. samples: Buffer to store the last N intervals. lastTime: Last time stamp count: Total counts ''' def __init__(self, numSamples): ''' Args: numSamples(int): Number of samples to calculate the average. ''' self.numSamples = numSamples self.samples =
np.zeros(self.numSamples)
numpy.zeros
from __future__ import print_function import numpy as np from scipy.linalg import eigh, expm, norm from scipy.sparse import csr_matrix, spmatrix from math import factorial import warnings from functools import reduce try: import qutip except ImportError: qutip = None class Setup(object): sparse = False def __init__(self, H0, Hcs, c_ops=None, loss_vec=None, sparse=False): self.sparse = sparse if c_ops is None: c_ops = [] self.c_ops = c_ops = self.map_from_qobj(c_ops) self.H0 = self.from_qobj(H0) for op in c_ops: self.H0 += -0.5j*op.conj().T.dot(op) dim = self.H0.shape[0] assert self.H0.shape == (dim, dim) self.Hcs = self.map_from_qobj(Hcs) n_ctrls = self.Hcs.shape[0] if not self.sparse: assert self.Hcs.shape == (n_ctrls, dim, dim), self.Hcs.shape self.hermitian = True for H in [self.H0] + list(self.Hcs): if self.sparse: H = H.toarray() if not np.allclose(H, H.conj().T): print('Non-Hermitian hamiltonian detected!') self.hermitian = False break self.loss_vec = loss_vec def from_qobj(self, A, sparse=None): if sparse is None: sparse = self.sparse if qutip is not None and isinstance(A, qutip.Qobj): arr = np.squeeze(A.full()) elif sparse and isinstance(A, spmatrix): return A.tocsr() else: arr = np.asarray(A).copy().astype(complex) if sparse and arr.ndim == 2 and arr.shape[0] == arr.shape[1]: return csr_matrix(arr) return arr def map_from_qobj(self, A, sparse=None): return np.array([self.from_qobj(a, sparse=sparse) for a in A]) def get_fids(self, controls, aux_params, dt): raise NotImplementedError def set_dtype(self, dtype): self.H0 = self.H0.astype(dtype) self.Hcs = [Hc.astype(dtype) for Hc in self.Hcs] class StateTransferSetup(Setup): r"""Optimize a problem of the form .. math:: \max_\epsilon \big|\sum_k \langle \text{final}_k| U(\epsilon) |\text{init}_k\rangle\big| Since the absolute value is taken after the sum, this results in a coherent evolution of the initial states into the final states. """ def __init__(self, H0, Hcs, inits, finals, c_ops=None, gauge_ops=None, loss_vec=None, coherent=True, sparse=False, use_taylor=False): self.use_taylor = use_taylor self.taylor_order = 5 if not use_taylor: if sparse: warnings.warn('Exact (non-taylor) method incompatible with sparse matrices, using dense matrices') sparse = False super(StateTransferSetup, self).__init__(H0, Hcs, c_ops=c_ops, loss_vec=loss_vec, sparse=sparse) self.inits = self.map_from_qobj(inits) self.finals = self.map_from_qobj(finals) self.gauge_ops = None self.coherent = coherent if gauge_ops is not None: self.gauge_ops = self.map_from_qobj(gauge_ops, sparse=False) def optimize_taylor_order(self, max_norm, plen, dt, aux_params=None, tol=1e-6): if aux_params is None: aux_params = [] orders = [] for _ in range(3): ctrls = max_norm * np.random.randn(len(self.Hcs), plen) self.taylor_order = 5 prev_psi = self.get_fids(ctrls, aux_params, dt)[0] rel_err = 1 while rel_err > tol: self.taylor_order += 1 psi = self.get_fids(ctrls, aux_params, dt)[0] rel_err = np.sum(np.abs(psi - prev_psi)**2) / np.sum(np.abs(psi)**2) print('Taylor order:', self.taylor_order, 'Rel Err:', rel_err) prev_psi = psi orders.append(self.taylor_order) self.taylor_order = max(orders) print('Using taylor order', self.taylor_order) def __getitem__(self, item): return [self.H0, self.Hcs, self.inits, self.finals, self.gauge_ops][item] def get_fids(self, controls, aux_params, dt): if self.use_taylor: return taylor_states_fidelity( controls, self.H0, self.Hcs, self.inits, self.finals, dt=dt, gauge_vals=aux_params, gauge_ops=self.gauge_ops, hermitian=self.hermitian, coherent=self.coherent, loss_vec=self.loss_vec, order=self.taylor_order ) else: return states_fidelity( controls, self.H0, self.Hcs, self.inits, self.finals, dt=dt, gauge_vals=aux_params, gauge_ops=self.gauge_ops, hermitian=self.hermitian, coherent=self.coherent, loss_vec=self.loss_vec ) def set_dtype(self, dtype): super(StateTransferSetup, self).set_dtype(dtype) self.inits = self.inits.astype(dtype) self.finals = self.finals.astype(dtype) if self.gauge_ops is not None: self.gauge_ops = self.gauge_ops.astype(dtype) class UnitarySetup(Setup): r"""Optimize a problem of the form .. math:: \max_\epsilon \big|\text{Tr}[U_\text{target} U(\epsilon)^\dagger]\big| """ def __init__(self, H0, Hcs, U_target, c_ops=None, gauge_ops=None): super(UnitarySetup, self).__init__(H0, Hcs, c_ops=c_ops) self.U_target = self.from_qobj(U_target) self.gauge_ops = None if gauge_ops is not None: self.gauge_ops = self.map_from_qobj(gauge_ops) def __getitem__(self, item): return [self.H0, self.Hcs, self.U_target][item] def get_fids(self, controls, aux_params, dt): return prop_fidelity( controls, self.H0, self.Hcs, self.U_target, aux_params, self.gauge_ops, dt, hermitian=self.hermitian, loss_vec=self.loss_vec ) def set_dtype(self, dtype): super(UnitarySetup, self).set_dtype(dtype) self.U_target = self.U_target.astype(dtype) if self.gauge_ops is not None: self.gauge_ops = self.gauge_ops class ExpectationSetup(Setup): def __init__(self, H0, Hcs, inits, expect_ops, c_ops=None): super(ExpectationSetup, self).__init__(H0, Hcs, c_ops=c_ops) self.inits = self.from_qobj(inits) #map_from_qobj(inits) self.expect_ops = self.from_qobj(expect_ops) #map_from_qobj(expect_ops) def __getitem__(self, item): return [self.H0, self.Hcs, self.inits, self.expect_ops][item] def get_fids(self, controls, aux_params, dt): prop, fid, d_fid = get_expectation(controls, self.H0, self.Hcs, self.inits, self.expect_ops, dt) return prop, fid, d_fid, np.zeros_like(aux_params) def set_dtype(self, dtype): super(ExpectationSetup, self).set_dtype(dtype) self.inits = self.inits.astype(dtype) self.expect_ops = self.expect_ops.astype(dtype) class LindbladSetup(StateTransferSetup): def __init__(self, H0, Hcs, inits, finals, c_ops, loss_vec=None, **kwargs): L0 = self.make_liouvillian(H0) + sum(map(self.make_dissipator, c_ops)) Lcs = np.array(list(map(self.make_liouvillian, Hcs))) inits = self.map_from_qobj(inits) finals = self.map_from_qobj(finals) if inits[0].shape[0] != L0.shape[0]: rho_inits = [np.outer(i1, i2.conj()).flatten() for i1 in inits for i2 in inits] rho_finals = [np.outer(f1, f2.conj()).flatten() for f1 in finals for f2 in finals] else: rho_inits = inits rho_finals = finals super(LindbladSetup, self).__init__(L0, Lcs, rho_inits, rho_finals, **kwargs) # self.hermitian = False def get_fids(self, controls, aux_params, dt): prop, fid, d_fid, d_fid_aux = super(LindbladSetup, self).get_fids(controls, aux_params, dt) fid = np.sqrt(fid) d_fid = d_fid / fid d_fid_aux = d_fid_aux / fid return prop, fid, d_fid, d_fid_aux def make_liouvillian(self, H): H = self.from_qobj(H) I = np.eye(H.shape[0]) return (np.kron(I, H) - np.kron(H.T, I)) def make_dissipator(self, c_op): c_op = self.from_qobj(c_op) cd = c_op.T.conj() c = c_op cdc = cd.dot(c) I = np.eye(c_op.shape[0]) return 1j * (np.kron(cd.T, c) - 0.5 * (np.kron(I, cdc) + np.kron(cdc.T, I))) class SubspaceSetup(StateTransferSetup): def get_fids(self, controls, aux_params, dt): assert not self.use_taylor return states_fidelity( controls, self.H0, self.Hcs, self.inits, self.finals, dt=dt, gauge_vals=aux_params, gauge_ops=self.gauge_ops, hermitian=self.hermitian, coherent=False, subspace_contain=True, loss_vec=self.loss_vec ) def states_fidelity(controls, H_drift, H_controls, inits, finals, gauge_vals=None, gauge_ops=None, dt=1, hermitian=True, coherent=True, subspace_contain=False, loss_vec=None): n_ctrls, plen = controls.shape n_states = len(inits) use_gauge = gauge_ops is not None dim = H_drift.shape[0] H_drift = dt * H_drift H_controls = dt * np.array(H_controls) # TODO: Don't re-initialize every time if possible props = np.empty((plen, dim, dim), H_drift.dtype) d_props = np.empty((n_ctrls, plen, dim, dim), H_drift.dtype) for i, time_slice in enumerate(controls.T): H = H_drift + sum(c*Hc for c,Hc in zip(time_slice, H_controls)) if hermitian: props[i], d_props[:, i, :, :] = step_propagator(H, H_controls, loss_vec) else: props[i], d_props[:, i, :, :] = step_propagator_nonhermitian(H, H_controls) if use_gauge: g_sum = sum(g_val*g_op for g_val, g_op in zip(gauge_vals, gauge_ops)) g_prop, d_g_props = step_propagator(g_sum, gauge_ops) props = np.concatenate((props, [g_prop])) prop_inits = [inits.T] for prop in props: prop_inits.append(prop.dot(prop_inits[-1])) prop_finals = [finals.conj()] for prop in reversed(props): prop_finals.append(prop_finals[-1].dot(prop)) prop_finals.reverse() if coherent: ovlp = np.sum(prop_finals[-1].T * prop_inits[-1]) fid = abs(ovlp) d_ovlps = [] for i, (pi, pf) in enumerate(zip(prop_inits[:plen], prop_finals[1:])): for d_prop in d_props[:, i]: d_ovlps.append(np.sum(pf.T * d_prop.dot(pi))) d_ovlps = np.array(d_ovlps).reshape((plen, n_ctrls)).T d_fids = (ovlp.real*d_ovlps.real + ovlp.imag*d_ovlps.imag) / (fid) elif subspace_contain: ovlps = prop_finals[-1].dot(prop_inits[-1]) a_ovlps = np.abs(ovlps)**2 fid = np.sum(a_ovlps) d_fids = [] for i, (pi, pf) in enumerate(zip(prop_inits[:plen], prop_finals[1:])): for d_prop in d_props[:, i]: d_ovlp = pf.dot(d_prop.dot(pi)) d_a_ovlps = 2 * (ovlps.real*d_ovlp.real + ovlps.imag*d_ovlp.imag) d_fids.append(np.sum(d_a_ovlps)) d_fids = np.array(d_fids).reshape((plen, n_ctrls)).T else: ovlps = np.sum(prop_finals[-1].T * prop_inits[-1], axis=0) a_ovlps = np.abs(ovlps)**2 fid = np.sum(a_ovlps) d_fids = [] for i, (pi, pf) in enumerate(zip(prop_inits[:plen], prop_finals[1:])): for d_prop in d_props[:, i]: d_ovlp = pf.T * d_prop.dot(pi) d_a_ovlps = 2 * (ovlps.real*d_ovlp.real + ovlps.imag*d_ovlp.imag) d_fids.append(np.sum(d_a_ovlps)) d_fids = np.array(d_fids).reshape((plen, n_ctrls)).T if not use_gauge: return prop_inits[-1], fid / n_states, d_fids / n_states, [] d_g_ovlps = [] pi = prop_inits[-2] pf = prop_finals[-1] for d_prop in d_g_props: d_g_ovlps.append(np.sum(pf.T * d_prop.dot(pi))) d_g_ovlps = np.array(d_g_ovlps) d_g_fids = (ovlp.real*d_g_ovlps.real + ovlp.imag*d_g_ovlps.imag) / (fid) return prop_inits[-1], fid / n_states, d_fids / n_states, d_g_fids / n_states def get_expectation(controls, H_drift, H_controls, init, expect_op, dt=1): H_drift = dt * H_drift H_controls = dt * np.array(H_controls) tot_prop, d_tot_props, _ = total_propagator(controls, H_drift, H_controls) final = tot_prop.dot(init) d_finals = np.einsum('ijkl,l->ijk', d_tot_props, init) expect = final.conj().T.dot(expect_op).dot(final).real d_op_finals = np.einsum('ij,klj->kli', expect_op, d_finals) d_expects = 2*np.einsum('i,jki->jk', final.conj(), d_op_finals).real return tot_prop, expect, d_expects def prop_fidelity(controls, H_drift, H_controls, U_target, gauge_vals, gauge_ops, dt=1, hermitian=True, loss_vec=None): """ Get the total propagator as well as the fidelity to a given target defined as abs(Tr(U_target . U.conj().T)) and the gradient of the fidelity with respect to the controls """ H_drift = dt * H_drift H_controls = dt * np.array(H_controls) tot_prop, d_tot_props, d_g_props = total_propagator( controls, H_drift, H_controls, gauge_vals, gauge_ops, hermitian=hermitian, loss_vec=loss_vec ) return prop_fidelity_from_U(tot_prop, d_tot_props, d_g_props, U_target) def prop_fidelity_from_U(U, dUs, d_g_Us, U_target): norm = np.sum(abs(U_target)**2) overlap = np.sum(U_target.conj() * U) / norm d_overlaps = np.sum(U_target.conj() * dUs, axis=(2,3)) / norm fid = abs(overlap) d_fid = (overlap.real*d_overlaps.real + overlap.imag*d_overlaps.imag) / fid if len(d_g_Us) == 0: d_g_fid = [] else: d_g_overlaps = np.sum(U_target.conj() * d_g_Us, axis=(1,2)) / norm d_g_fid = (overlap.real*d_g_overlaps.real + overlap.imag*d_g_overlaps.imag) / fid return U, fid, d_fid, d_g_fid def total_propagator(controls, H_drift, H_controls, gauge_vals=None, gauge_ops=None, hermitian=True, loss_vec=None): """ Compute step propagator for each time point and take product to find the total propagator. Similarly find the derivative of the propagator with respect to the controls. :param controls: (N_CTRLS, PLEN) real array :param H_drift: (DIM, DIM) complex array :param H_controls: (N_CTRLS, DIM, DIM) complex array :return: (U_total, [d(U_total)/d(controls)]) """ n_ctrls, plen = controls.shape dim = H_drift.shape[0] use_gauge = gauge_ops is not None props = np.empty((plen, dim, dim), H_drift.dtype) d_props = np.empty((n_ctrls, plen, dim, dim), H_drift.dtype) for i, time_slice in enumerate(controls.T): H = H_drift + sum(c*Hc for c,Hc in zip(time_slice, H_controls)) if hermitian: props[i], d_props[:, i, :, :] = step_propagator(H, H_controls, loss_vec) else: props[i], d_props[:, i, :, :] = step_propagator_nonhermitian(H, H_controls) if use_gauge: g_sum = sum(g_val*g_op for g_val, g_op in zip(gauge_vals, gauge_ops)) g_prop, d_g_props = step_propagator(g_sum, gauge_ops) props = np.concatenate((props, [g_prop])) ahead = [np.identity(dim)] for prop in props[:-1]: ahead.append(prop.dot(ahead[-1])) behind = [np.identity(dim)] for prop in reversed(props[1:]): behind.append(behind[-1].dot(prop)) behind.reverse() tot_prop = props[-1].dot(ahead[-1]) d_tot_props = [list(map(mdot, list(zip(behind, d_props[i], ahead)))) for i in range(n_ctrls)] if not use_gauge: return tot_prop, np.array(d_tot_props), [] d_g_tot_props = [mdot((behind[-1], d_prop, ahead[-1])) for d_prop in d_g_props] return tot_prop, np.array(d_tot_props), np.array(d_g_tot_props) def total_propagator_only(controls, H_drift, H_controls, gauge_vals=None, gauge_ops=None, hermitian=True, loss_vec=None, step_props=False): n_ctrls, plen = controls.shape dim = H_drift.shape[0] use_gauge = gauge_ops is not None props = np.empty((plen, dim, dim), H_drift.dtype) for i, time_slice in enumerate(controls.T): H = H_drift + sum(c*Hc for c,Hc in zip(time_slice, H_controls)) if hermitian: props[i] = step_propagator(H, H_controls, loss_vec, prop_only=True) else: props[i] = step_propagator_nonhermitian(H, H_controls, prop_only=True) if use_gauge: raise NotImplemented # not tested g_sum = sum(g_val*g_op for g_val, g_op in zip(gauge_vals, gauge_ops)) g_prop, d_g_props = step_propagator(g_sum, gauge_ops) props = np.concatenate((props, [g_prop])) ahead = [np.identity(dim)] for prop in props[:-1]: ahead.append(prop.dot(ahead[-1])) tot_prop = props[-1].dot(ahead[-1]) if step_props: return tot_prop, props return tot_prop def step_propagator_nonhermitian(A, Bs, n=3, beta=0.1, prop_only=False): d = max(int(np.ceil(np.log2(norm(A)/beta))), 0) X = -1j*A / 2**d Ys = [-1j*B / 2**d for B in Bs] X2 = X / 2 eX2 = expm(X2) eX = eX2.dot(eX2) if prop_only: eA = eX for k in range(d): eA = eA.dot(eA) return eA coef = lambda k: 1.0 / factorial(2*k + 1) deXs = [] for Y in Ys: G = coef(n)*Y for k in reversed(list(range(n))): C1 = G.dot(X2) - X2.dot(G) C2 = C1.dot(X2) - X2.dot(C1) G = coef(k)*Y + C2 deXs.append(eX2.dot(G).dot(eX2)) eA = eX deAs = deXs for k in range(d): deAs = [eA.dot(deA) + deA.dot(eA) for deA in deAs] eA = eA.dot(eA) return eA, deAs def step_propagator(H, dHs, loss_vec=None, prop_only=False): """ Compute e^(-i*H) and (matrix-valued) derivatives in the directions Hc for Hc in Hcs. See doi:10.1006/aama.1995.1017, equation (7) :param H: hermitian matrix to take exponential of :param dHs: list of hermitian matrices to take derivatives in the direction of :return: (prop, grads) """ vals, basis = eigh(H) i_vals = -1j*vals basis_hc = basis.conj().T prop = (np.exp(i_vals) * basis).dot(basis_hc) if prop_only: return prop # Loewner matrix G z = -(i_vals.reshape((-1, 1)) - i_vals) z_mask = abs(z) < 1e-8 G = np.zeros_like(z) G[~z_mask] = (np.exp(z[~z_mask]) - 1) / z[~z_mask] G[z_mask] = 1 + z[z_mask] / 2 left = prop.dot(basis) # todo: eliminate this operation by adjusting G d_props = [] for dH in dHs: inner = G * basis_hc.dot(dH.dot(basis)) d_prop = -1j * left.dot(inner.dot(basis_hc)) d_props.append(d_prop) if loss_vec is not None: prop = (loss_vec * prop.T).T d_props = [(loss_vec * d_prop.T).T for d_prop in d_props] return prop, np.array(d_props) def get_unitary(controls, H_drift, H_controls, dt): U = np.eye(H_drift.shape[0]) for i, time_slice in enumerate(controls.T): H = H_drift + sum(c*Hc for c,Hc in zip(time_slice, H_controls)) U = expm(-1j*H*dt).dot(U) return U def mdot(ops): """ Take the dot product of an arbitrary number of terms """ return reduce(np.dot, ops) def taylor_states_fidelity(controls, H0, Hcs, inits, finals, dt=1, gauge_vals=None, gauge_ops=None, hermitian=True, coherent=True, loss_vec=None, order=5): if gauge_ops is not None and len(gauge_ops) == 0: gauge_ops = None if gauge_ops is not None: assert len(gauge_ops) == len(gauge_vals) nctrls, plen = controls.shape n_states, dim = inits.shape if isinstance(H0, np.ndarray): H0_hc = H0.conj().T Hcs_hc = [hc.conj().T for hc in Hcs] else: H0_hc = H0.conj().T.tocsr() Hcs_hc = [hc.conj().T.tocsr() for hc in Hcs] if gauge_ops is not None: g_sum = sum(g_val*g_op for g_val, g_op in zip(gauge_vals, gauge_ops)) g_prop, d_g_props = step_propagator(g_sum, gauge_ops) # Propagate forward, with derivative prop_inits = [inits.T] d_prop_inits = [] for cs in controls.T: L = -1j*dt*(H0 + sum(c*Hc for c, Hc in zip(cs, Hcs))) psi = prop_inits[-1].copy() # Next psi is sum over taylor terms psi_k = (L^k)/(k!)psi_0 psi_k = psi d_psis = [0]*len(Hcs) d_psi_ks = [np.zeros_like(psi) for _ in range(len(Hcs))] for k in range(1, order+1): for i, Hc in enumerate(Hcs): d_psi_ks[i] = (L.dot(d_psi_ks[i]) + -1j*dt*Hc.dot(psi_k)) / k d_psis[i] += d_psi_ks[i] psi_k = L.dot(psi_k) / k psi += psi_k if loss_vec is not None: psi = (loss_vec * psi.T).T d_psis = [(loss_vec * d_psi.T).T for d_psi in d_psis] prop_inits.append(psi) d_prop_inits.append(d_psis) if gauge_ops is not None: d_prop_inits.append([dg.dot(prop_inits[-1]) for dg in d_g_props]) prop_inits.append(g_prop.dot(prop_inits[-1])) # Propagate backward, derivative not needed prop_finals = [finals.T] if gauge_ops is not None: prop_finals.append(g_prop.conj().T.dot(prop_finals[-1])) for cs in reversed(controls.T): Lhc = 1j*dt*(H0_hc + sum(c*Hc for c, Hc in zip(cs, Hcs_hc))) psi = prop_finals[-1].copy() if loss_vec is not None: psi = (loss_vec * psi.T).T psi_k = psi for k in range(1, order+1): psi_k = Lhc.dot(psi_k) / k psi += psi_k prop_finals.append(psi) prop_finals.reverse() # Compute fid if coherent: ovlp = np.sum(prop_finals[-1].conj() * prop_inits[-1]) / n_states fid = abs(ovlp) else: ovlps = np.sum(prop_finals[-1].conj() * prop_inits[-1], axis=0) a_ovlps = np.abs(ovlps)**2 fid = np.sum(a_ovlps) / n_states # Check overlaps # for pi, pf in zip(prop_inits, prop_finals): # ovlp2 = np.sum(pf.conj() * pi) / n_states # assert np.allclose(ovlp, ovlp2), (ovlp, ovlp2) # Compute d_fid / d_controls d_fids = [] if coherent: for i, prop_final in enumerate(prop_finals[1:]): for d_prop_init in d_prop_inits[i]: d_ovlp = np.sum(prop_final.conj() * d_prop_init) / n_states d_fids.append((d_ovlp.real * ovlp.real + d_ovlp.imag * ovlp.imag) / fid) else: for i, prop_final in enumerate(prop_finals[1:]): for d_prop_init in d_prop_inits[i]: d_ovlp = prop_final.conj() * d_prop_init d_a_ovlps = 2 * (ovlps.real*d_ovlp.real + ovlps.imag*d_ovlp.imag) d_fids.append(np.sum(d_a_ovlps) / n_states) d_g_fids = np.array([]) if gauge_ops is not None: ng = len(gauge_ops) d_g_fids = np.array(d_fids[-ng:]) d_fids = d_fids[:-ng] d_fids = np.array(d_fids).reshape((plen, nctrls)).T return prop_inits[-1], fid, d_fids, d_g_fids if __name__ == '__main__': from .preparations import random_hermitian dim = 5 n_gauge = 3 n_ctrls = 2 plen = 10 idx = 0 _, U_target = eigh(random_hermitian(dim)) H0 = random_hermitian(dim) Hcs = [random_hermitian(dim) for _ in range(n_ctrls)] gauge_ops = [random_hermitian(dim) for _ in range(n_gauge)] gauge_vals = np.random.randn(n_gauge) controls =
np.random.randn(n_ctrls, plen)
numpy.random.randn
### ### Date: 25/11/2021 ### Author: Konrad (Veinar) ### from functools import singledispatchmethod import numpy as np class NeuralNetwork: # Constructor def __init__(self, num_Input, num_Hidden, num_Output, learning_rate=0.1) -> None: # Get values from args (size/shape of NN) self.input_nodes = num_Input self.hidden_nodes = num_Hidden self.output_nodes = num_Output # Randomize weights on layer Input-Hidden self.weights_ih = np.random.default_rng(np.random.randint(1, 100)).random( (self.hidden_nodes, self.input_nodes) ) # self.weights_ih = np.ones((self.hidden_nodes, self.input_nodes)) # Randomize weights in layer Hidden-Output self.weights_ho = np.random.default_rng(np.random.randint(1, 100)).random( (self.output_nodes, self.hidden_nodes) ) # self.weights_ho = np.ones((self.output_nodes, self.hidden_nodes)) # Set BIAS for layers Hidden and Output self.bias_h = np.ones((self.hidden_nodes, 1)) # self.bias_h = np.random.default_rng(np.random.randint(1, 100)).random( # (self.hidden_nodes, 1) # ) self.bias_o = np.ones((self.output_nodes, 1)) # self.bias_o = np.random.default_rng(np.random.randint(1, 100)).random( # (self.output_nodes, 1) # ) self.bias_h *= -1 self.bias_o *= -1 # Declare learning rate self.learning_rate = learning_rate # Set variables for errors per every layer self.hidden_error = None self.output_error = None # Set variables for layers after sigmoid function self.output = None self.hidden = None # Put data into NN def feedforward(self, input): # Make vertical array out of input input = np.array(input) input = np.vstack(input) self.hidden = np.dot(self.weights_ih, input) self.hidden = np.add(self.hidden, self.bias_h) # Activation function for hidden layer self.hidden = self.sigmoid(self.hidden) self.output = np.dot(self.weights_ho, self.hidden) self.output = np.add(self.output, self.bias_o) # Activation function for output layer self.output = self.sigmoid(self.output) return self.output # Activation function def sigmoid(self, x): return 1 / (1 + np.exp(-x)) # Devirative for activation function def derivative_sigmoid(self, x): return self.sigmoid(x) * (1 - self.sigmoid(x)) # Simplified diverative for activation function (for use in backpropagation) def calculate_gradient(self, x): return x * (1 - x) # Backpropagation of NN def backpropagation(self, inputs, targets) -> None: # Feed NN self.output = self.feedforward(inputs) # TODO: delete this np.printoptions(suppress=True) # Make vertical matrix out of input input = np.array(inputs) input = np.vstack(input) # Make vertical matrix out of targets target = np.array(targets) target = np.vstack(target) # Calculate output error which is diffrence between target and output # ERROR = TARGET - OUTPUT self.output_error = np.subtract(target, self.output) # OK! [rows = output_num, cols = 1] # Calculate hidden layer errors transposed_weights_ho = np.transpose(self.weights_ho) self.hidden_error = np.dot(transposed_weights_ho, self.output_error) # OK! [rows = hidden_num, cols = 1] # ----------------------------------------------------------------- # Calculate delta to weights in HO layer # ----------------------------------------------------------------- # DeltaHO = LEARN_RATE * output_error * (output * (1 - output)) -dot- hidden^T delta_weights_ho = np.multiply(self.output_error, self.learning_rate) delta_bias_o = self.calculate_gradient(delta_weights_ho) delta_weights_ho = self.calculate_gradient(delta_weights_ho) hidden_transposed = np.transpose(self.hidden) delta_weights_ho = np.dot(delta_weights_ho, hidden_transposed) # OK! same size as weights_ho # ----------------------------------------------------------------- # Calculate delta to weights in IH layer # ----------------------------------------------------------------- # DeltaIH = LEARN_RATE * hidden_error * (hidden * (1 - hidden)) -dot- Input^T delta_weights_ih = np.multiply(self.hidden_error, self.learning_rate) delta_bias_h = self.calculate_gradient(delta_weights_ih) delta_weights_ih = self.calculate_gradient(delta_weights_ih) input_transposed =
np.transpose(input)
numpy.transpose
"""This Module contains basic Contextual Multi-Armed Bandit Algorithms.""" import copy import math import random from abc import ABC, abstractmethod import numpy as np from pandas import DataFrame, Series from scipy.stats import norm def sigmoid(x): return 1.0 / (1.0 + np.exp(-x)) class MABInterface(ABC): """Abstract base class for various Multi-Armed Bandit Algorithms.""" @abstractmethod def select_arm(self) -> None: """Decide which arm should be selected.""" pass @abstractmethod def update(self) -> None: """Update the information about the arms.""" pass @abstractmethod def batch_update(self) -> None: """Update the information about the arms.""" pass class LinUCB(MABInterface): """Linear Upper Confidence Bound Algorithm for Contextual Multi-Armed Bandit Problem. References ------- [1] <NAME>, <NAME>, John, and <NAME>.: A contextual-bandit approach to personalized news article recommendation. In Proceedings of the 19th International Conference on World Wide Web, pp. 661–670. ACM, 2010. """ def __init__(self, n_arms: int, feature_dim: int, alpha: float =1.0, warmup: int =15, batch_size: int=0) -> None: """Initialize class. :param n_arms: the number of given arms. :param feature_dim: dimentions of context matrix. :param alpha: the hyper-parameter which represents how often the algorithm explore. :param warmup: how many times the algorithms randomly explore arms at first. :param batch_size: the size of information about rewards given in a update. """ self.n_arms = n_arms self.feature_dim = feature_dim self.warmup = warmup self.alpha = alpha self.theta = [copy.deepcopy(np.zeros(self.feature_dim)) for i in np.arange(n_arms)] # d * 1 self.A_inv = [copy.deepcopy(np.matrix(np.identity(self.feature_dim))) for i in np.arange(self.n_arms)] # d * d self.b = [copy.deepcopy(np.matrix(np.zeros(self.feature_dim)).T) for i in np.arange(self.n_arms)] # d * 1 self.data_size = 0 self.batch_size = batch_size self._A_inv = [copy.deepcopy(np.matrix(np.identity(self.feature_dim))) for i in np.arange(self.n_arms)] # d * d self._b = [copy.deepcopy(np.matrix(np.zeros(self.feature_dim)).T) for i in np.arange(self.n_arms)] # d * 1 self.counts = np.zeros(self.n_arms, dtype=int) self.rewards = 0 def select_arm(self, x: np.matrix) -> int: """Decide which arm should be selected. :param x: observed context matrix. :return: index of the selected arm. """ if True in (self.counts < self.warmup): result = np.where(self.counts < self.warmup)[0][0] else: ucb_values = np.zeros(self.n_arms) self.theta = np.concatenate([self.A_inv[i].dot(self.b[i]) for i in np.arange(self.n_arms)], axis=1) # user_dim * n_arms mu_hat = self.theta.T.dot(x) # n_arms * 1 sigma_hat = self.alpha * np.concatenate([np.sqrt(x.T.dot(self.A_inv[i].dot(x))) for i in np.arange(self.n_arms)], axis=0) # n_arms * 1 result = np.argmax(mu_hat + sigma_hat) return result def update(self, x: np.matrix, chosen_arm: int, reward: float) -> None: """Update the information about the arms. :param x: observed context matrix. :param chosen_arm: index of the chosen arm. :param reward: reward from the chosen arm. """ self.counts[chosen_arm] += 1 self.rewards += reward self.A_inv[chosen_arm] -= self.A_inv[chosen_arm].dot(x.dot(x.T.dot(self.A_inv[chosen_arm]))) / (1 + x.T.dot(self.A_inv[chosen_arm].dot(x))) self.b[chosen_arm] += x * reward # d * 1 def batch_update(self, x: np.matrix, chosen_arm: int, reward: float) -> None: """Update the information about the arms with a new batch of data. :param x: observed context matrix. :param chosen_arm: index of the chosen arm. :param reward: reward from the chosen arm. """ self.data_size += 1 self.counts[chosen_arm] += 1 self.rewards += reward self._A_inv[chosen_arm] -= self._A_inv[chosen_arm].dot(x.dot(x.T.dot(self._A_inv[chosen_arm]))) / (1 + x.T.dot(self._A_inv[chosen_arm].dot(x))) # d * d self._b[chosen_arm] += x * reward # d * 1 if self.data_size % self.batch_size == 0: self.A_inv = copy.deepcopy(self._A_inv) # d * d self.b = copy.deepcopy(self._b) # d * 1 class HybridLinUCB(MABInterface): """Hybrid Linear Upper Confidence Bound Algorithm for Contextual Multi-Armed Bandit Problem. References ------- [1] <NAME>, <NAME>, Langford, John, and Schapire, <NAME>.: A contextual-bandit approach to personalized news article recommendation. In Proceedings of the 19th International Conference on World Wide Web, pp. 661–670. ACM, 2010. """ def __init__(self, n_arms: int, z_dim: int, x_dim: int, alpha: float =1.0, warmup: int =15, batch_size: int=0) -> None: """Initialize class. :param n_arms: the number of given arms. :param z_dim: dimensions of context matrix which is common to all arms. :param x_dim: dimentions of context matrix which is unique to earch arm. :param alpha: the hyper-parameter which represents how often the algorithm explore. :param warmup: how many times the algorithms randomly explore arms at first. :param batch_size: the size of information about rewards given in a update. """ self.n_arms = n_arms self.z_dim = z_dim # k self.x_dim = x_dim # d self.warmup = warmup self.alpha = alpha self.beta = np.zeros(self.z_dim) self.theta = None # d * 1 # matrices which are common to all context self.A_zero = np.matrix(np.identity(self.z_dim)) # k * k self.b_zero = np.matrix(np.zeros(self.z_dim)).T # k * 1 # matrices which are different for each context self.A_inv = [copy.deepcopy(np.matrix(np.identity(self.x_dim))) for i in np.arange(self.n_arms)] self.B = [copy.deepcopy(np.matrix(np.zeros((self.x_dim, self.z_dim)))) for i in range(self.n_arms)] # d * k self.b = [copy.deepcopy(np.matrix(np.zeros(self.x_dim)).T) for i in range(self.n_arms)] # d * 1 self.data_size = 0 self.batch_size = batch_size self._A_zero = np.matrix(np.identity(self.z_dim)) # k * k self._b_zero = np.matrix(np.zeros(self.z_dim)).T # k * 1 self._A_inv = [copy.deepcopy(np.matrix(np.identity(self.x_dim))) for i in range(self.n_arms)] # d * d self._B = [copy.deepcopy(np.matrix(np.zeros((self.x_dim, self.z_dim)))) for i in range(self.n_arms)] # d * k self._b = [copy.deepcopy(np.matrix(np.zeros(self.x_dim)).T) for i in range(self.n_arms)] # d * 1 self.counts = np.zeros(self.n_arms, dtype=int) self.rewards = 0 def select_arm(self, x: np.matrix) -> int: """Decide which arm should be selected. :param x: observed context matrix. :return: index of the selected arm. """ z = x[:][:self.z_dim] x = x[:][self.z_dim:] if True in (self.counts < self.warmup): result = np.where(self.counts < self.warmup)[0][0] else: ucb_values = np.zeros(self.n_arms) self.beta = np.linalg.inv(self.A_zero).dot(self.b_zero) # k * 1 self.theta = [self.A_inv[i].dot(self.b[i] - self.B[i].dot(self.beta)).A.reshape(self.x_dim) for i in np.arange(self.n_arms)] # d * 1 mu_hat = [z.T.dot(self.beta) + x.T.dot(self.theta[i]) for i in np.arange(self.n_arms)] s1 = z.T.dot(np.linalg.inv(self.A_zero)).dot(z).A[0] s2 = - 2 * np.array([z.T.dot(np.linalg.inv(self.A_zero)).dot(self.B[i].T).dot(self.A_inv[i]).dot(x) for i in
np.arange(self.n_arms)
numpy.arange
"""Transform a roidb into a trainable roidb by adding a bunch of metadata.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import datasets import numpy as np from model.utils.config import cfg import PIL import pdb import os from nuscenes import NuScenes from nuscenes.utils.geometry_utils import view_points, BoxVisibility import pickle def prepare_roidb(): """Enrich the imdb's roidb by adding some derived quantities that are useful for training. This function precomputes the maximum overlap, taken over ground-truth boxes, between each ROI and each ground-truth box. The class with maximum overlap is also recorded. """ classes = ('__background__', 'pedestrian', 'barrier', 'trafficcone', 'bicycle', 'bus', 'car', 'construction', 'motorcycle', 'trailer', 'truck') nusc_path = '/data/sets/nuscenes' nusc= NuScenes(version='v1.0-trainval', dataroot = nusc_path, verbose= True) if os.path.exists('lib/roi_data_layer/roidb_nuscenes_mini.pkl'): print("Reading roidb..") pickle_in = open("lib/roi_data_layer/roidb_nuscenes_mini.pkl","rb") roidb = pickle.load(pickle_in) return nusc, roidb else: file_dir = os.path.dirname(os.path.abspath(__file__)) roots = file_dir.split('/')[:-2] root_dir = "" for folder in roots: if folder != "": root_dir = root_dir + "/" + folder PATH = root_dir + '/data/train_mini.txt' with open(PATH) as f: image_token = [x.strip() for x in f.readlines()] roidb = [] print("Loading roidb...") for i in range(len(image_token)): im_token = image_token[i] sample_data = nusc.get('sample_data', im_token) image_name = sample_data['filename'] image_path = nusc_path + '/' + image_name data_path, boxes, camera_intrinsic = nusc.get_sample_data(im_token, box_vis_level=BoxVisibility.ALL) gt_boxes = [] gt_cls = [] for box in boxes: visibility_token = nusc.get('sample_annotation', box.token)['visibility_token'] vis_level = int(nusc.get('visibility', visibility_token)['token']) if (vis_level == 3) or (vis_level == 4): visible = True else: visible = False if visible == True: if box.name.split('.')[0] == 'vehicle': if box.name.split('.')[1] != 'emergency': name = box.name.split('.')[1] else: name = '' elif box.name.split('.')[0] == 'human': name = 'pedestrian' elif box.name.split('.')[0] == 'movable_object': if box.name.split('.')[1] != 'debris' and box.name.split('.')[1] != 'pushable_pullable': name = box.name.split('.')[1] else: name = '' else: name = '' if name != '': corners= view_points(box.corners(), view=camera_intrinsic, normalize=True)[:2,:] box = np.zeros(4) box[0]=
np.min(corners[0])
numpy.min
#! /usr/bin/env python3 # -*- coding: utf-8 -*- import sys import numpy as np import blosum as bl from random import randint def load_pssm(namefile, aa) : """ Lecture d'un fichier .aamtx et renvoie matrice PSSM """ with open (namefile, 'r') as f : for line in f : if line[0] == '>' : name = line[1:-1] elif line[0] in aa : seq = line[:-1] pssm = [] else : pssm.append([float(j) for j in line[:-1].split(' ') if j]) return(name, list(seq), pssm) def calc_score(vect1, vect2) : """ Calcule du score d'alignement entre 2 aa pour une position donnée """ s = 0 n1, n2 = len(vect1), len(vect2) for i in range(n1) : for j in range(n2) : s += vect1[i]*vect2[j] return(s) def calc_gap_optimal(nq, nt, pssmQ, pssmT, blosum): """ Fonction qui calcule et renvoie les pénalités d'ouverture (po) et d'extension de gap (pe) optimales """ gaps = np.arange(0.1, 10, 0.1) A = [] scores = [] blsmean = blosum.stack().mean() blsstd = blosum.stack().std() for gap in gaps: A.append((gap - blsmean) / blsstd) # Scores - random PSSM for i in range(nq): for j in range(nt): ri, rj = randint(0, nq-1), randint(0, nt-1) #sélection des indices de la pssm de manière aléatoires scores.append(calc_score(pssmQ[ri], pssmT[rj])) smean = np.array(scores).mean() sstd = np.array(scores).std() po = [] pe = [] for i in range(len(A)): po.append((A[i] * sstd) + smean) pe.append(po[i] * 10 / 100) return(-np.array(po).mean(), -np.array(pe).mean()) def init_matInsert(i, j, po): """ Initialisation matrice des ajouts de gaps (au niveau de la Query (Q) ou au niveau de la template (T)) entre aaQ (en position i) et aaT (en position j) """ if i > 0: return -np.inf else: if j > 0 and i == 0: return po*j else: return 0 def init_matMatch(i, j): """ Initialisation matrice des matchs (M) entre aaQ (en position i) et aaT (en position j) """ if j == 0 and i == 0: return 0 else: if j == 0 or i == 0: return -np.inf else: return 0 def scores_propositions(i, j, M, Q, T, pssmQ, pssmT, p, type_score): """ Calcul des propositions de scores d'alignement aaQ - aaT """ propositions = [] if type_score == 'M': score = calc_score(pssmQ[i-1], pssmT[j-1]) propositions.append(score + M[i-1,j-1]) propositions.append(Q[i,j]) propositions.append(T[i,j]) elif type_score == 'Q': propositions.append(M[i,j-1] + p) propositions.append(Q[i,j-1] + p) propositions.append(T[i,j-1] + p) elif type_score == 'T': propositions.append(M[i-1,j] + p) propositions.append(Q[i-1,j] + p) propositions.append(T[i-1,j] + p) return(propositions) def crea_matrix(fileQ, fileT): """ Création et initialisation des matrices des matchs (M) et des insertions de gaps (Q et T) (Méthode affine) """ aa, blosum = bl.load_blosum("bin/salut_1.0/data/BLOSUM62.txt") nameQ, seqQ, pssmQ = load_pssm(fileQ, aa) nameT, seqT, pssmT = load_pssm(fileT, aa) if len(seqQ) != len(pssmQ): print(">>> ERREUR : La longueur de la séquence QUERY est différente du nombre de lignes lues dans la PSSM QUERY\n") sys.exit(1) elif len(seqT) != len(pssmT): print(">>> ERREUR : La longueur de la séquence TEMPLATE est différente du nombre de lignes lues dans la PSSM TEMPLATE\n") sys.exit(1) nq, nt = len(seqQ)+1, len(seqT)+1 po, pe = calc_gap_optimal(len(seqQ), len(seqT), pssmQ, pssmT, blosum) M =
np.zeros((nq, nt))
numpy.zeros
# -*- coding: utf-8 -*- import time from utils import letterbox_image,exp,minAreaLine,draw_lines,minAreaRectBox,draw_boxes,line_to_line,sqrt,rotate_bound,timer,is_in from line_split import line_split import numpy as np import cv2 from PIL import Image from skimage import measure import json # crnn from crnn.crnn_torch import crnnOcr, crnnOcr2 tableNetPath = 'UNet/table.weights' SIZE = 512,512 tableNet = cv2.dnn.readNetFromDarknet(tableNetPath.replace('.weights','.cfg'),tableNetPath) def dnn_table_predict(img,prob=0.5): imgResize,fx,fy,dx,dy = letterbox_image(img,SIZE) imgResize = np.array(imgResize) imgW,imgH = SIZE image = cv2.dnn.blobFromImage(imgResize,1,size=(imgW,imgH),swapRB=False) image = np.array(image)/255 tableNet.setInput(image) out=tableNet.forward() out = exp(out[0]) # shape(2,512,512) , 2指的是横纵线两个类对应的map out = out[:,dy:,dx:] # 虽然左上点对上了,但是右方或下方的padding没去掉? return out,fx,fy,dx,dy def get_seg_table(img,prob,row=10,col=10): out,fx,fy,dx,dy = dnn_table_predict(img,prob) rows = out[0] cols = out[1] labels=measure.label(cols>prob,connectivity=2) regions = measure.regionprops(labels) ColsLines = [minAreaLine(line.coords) for line in regions if line.bbox[2]-line.bbox[0]>col ] # if debug: # cv2.imwrite('_cols.jpg',labels*255) labels=measure.label(rows>prob,connectivity=2) regions = measure.regionprops(labels) RowsLines = [minAreaLine(line.coords) for line in regions if line.bbox[3]-line.bbox[1]>row ] # RowsLines[0] = [xmin,ymin,xmax,ymax]注x指横向上,y指纵向上 # if debug: # cv2.imwrite('_rows.jpg',labels*255) imgW,imgH = SIZE tmp =np.zeros((imgH-2*dy,imgW-2*dx),dtype='uint8') tmp = draw_lines(tmp,ColsLines+RowsLines,color=255, lineW=1) # 闭运算:先膨胀后腐蚀,用来连接被误分为许多小块的对象 kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(3,3)) tmp = cv2.morphologyEx(tmp, cv2.MORPH_CLOSE, kernel,iterations=1) seg_table = cv2.resize(tmp,None,fx=1.0/fx,fy=1.0/fy,interpolation=cv2.INTER_CUBIC) degree = 0.0 if len(RowsLines) >= 3: degree = np.array([np.arctan2(bbox[3]-bbox[1],bbox[2]-bbox[0]) for bbox in RowsLines]) degree = np.mean(-degree*180.0/np.pi) return seg_table,degree def find_tables(img_seg): # from the seg image, detect big bounding box and decide how many tables in the picture tables = [] h,w = img_seg.shape _,contours, hierarchy = cv2.findContours(img_seg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for contour in contours: table_flag = True contourArea = cv2.contourArea(contour) if contourArea < h * w * 0.05: table_flag = False if not table_flag: continue contour = contour.reshape((-1, 2)) xmin,ymin = np.min(contour,axis=0) xmax,ymax = np.max(contour,axis=0) tables.append([xmin,ymin,xmax,ymax]) tables = sorted(tables,key=lambda x : x[1]) return np.array(tables) def find_cells(img_seg,tables): if not len(tables): return [] h,w = img_seg.shape tabelLabels=measure.label(img_seg==0,connectivity=2) regions=measure.regionprops(tabelLabels) rboxes= [] for table in tables: tmp = [] for i,region in enumerate(regions): if h*w*0.0001 < region.bbox_area <h*w*0.5: rbox = np.array(map(int,region.bbox))[[1,0,3,2]] if is_in(rbox,table): tmp.append(rbox) rboxes.append(np.array(tmp)) return np.array(rboxes) def annotate_cell(img,cells): # now cells is a ndarray with shape (n,4) res = np.array([{'text':''} for cell in cells]) # start col sc = 0 idx = cells[:, 0].argsort() cells = cells[idx] res = res[idx] eps = np.diff(cells,axis=0)[:,0] mean = np.mean(eps) breakpoints = np.where(eps >= mean)[0] for i,item in enumerate(res): item['start_col'] = sc if i in breakpoints: sc += 1 # end col ec = 0 idx = cells[:, 2].argsort() cells = cells[idx] res = res[idx] eps = np.diff(cells,axis=0)[:,2] #print(eps) mean = np.mean(eps) breakpoints = np.where(eps >= mean)[0] for i,item in enumerate(res): item['end_col'] = ec if i in breakpoints: ec += 1 # start row sr = 0 idx = cells[:, 1].argsort() cells = cells[idx] res = res[idx] eps = np.diff(cells,axis=0)[:,1] mean = np.mean(eps) breakpoints = np.where(eps >= mean)[0] for i,item in enumerate(res): item['start_row'] = sr if i in breakpoints: sr += 1 # end row er = 0 idx = cells[:, 3].argsort() cells = cells[idx] res = res[idx] eps = np.diff(cells,axis=0)[:,3] mean = np.mean(eps) breakpoints = np.where(eps >= mean)[0] for i,item in enumerate(res): item['end_row'] = er if i in breakpoints: er += 1 batch_list_text = [] for i,([xmin,ymin,xmax,ymax],info) in enumerate(zip(cells,res)): lines = line_split(img[ymin:ymax,xmin:xmax],y=ymin,x=xmin) for [_xmin,_ymin,_xmax,_ymax] in lines: #cv2.imwrite('./part/'+str(i)+'_'+str(_ymax)+'.jpg',img[_ymin:_ymax,_xmin:_xmax]) partImg = img[_ymin:_ymax,_xmin:_xmax] partImg = Image.fromarray(partImg).convert('L') batch_list_text.append((i, partImg.convert('L'))) try: i_value, batch_text = crnnOcr2(batch_list_text) except: print("!"*20) print('CUDA OUT OF MEMORY, SPLIT BATCH') print("!"*20) pt = int(len(batch_list_text)/4) i_value1, batch_text1 = crnnOcr2(batch_list_text[:pt]) i_value2, batch_text2 = crnnOcr2(batch_list_text[pt:2*pt]) i_value3, batch_text3 = crnnOcr2(batch_list_text[2*pt:3*pt]) i_value4, batch_text4 = crnnOcr2(batch_list_text[3*pt:]) i_value = i_value1 + i_value2 + i_value3 + i_value4 batch_text = batch_text1 + batch_text2 + batch_text3 + batch_text4 for i,text in zip(i_value,batch_text): res[i]['text'] += text.encode("UTF-8")+ '\n' res = res.tolist() res = sorted(res,key=lambda x: (x['start_row'], x['start_col'])) return res,er+1,ec+1 def find_text(tables,w,h): #find the non-table area for PSENet detection if not len(tables): return np.array([[0,0,w,h]]) Y1 = tables[:,[1,3]] Y2 = [] for i in range(len(Y1)): if i+1 == len(Y1): Y2.append(Y1[i]) break if Y1[i][1] >= Y1[i+1][0]: # ymax1 >= ymin2 Y1[i+1][0] = Y1[i][0] Y1[i+1][1] = max(Y1[i][1],Y1[i+1][1]) continue else: Y2.append(Y1[i]) Y2 = np.array(Y2).reshape(-1,) Y2 =
np.append(0,Y2)
numpy.append
"""Test CrabNet's fit and predict via `get_model()` and `predict()`.""" import numpy as np from crabnet.utils.data import get_data from crabnet.data.materials_data import elasticity from crabnet.crabnet_ import CrabNet import pandas as pd from matbench.bench import MatbenchBenchmark from time import time train_df, val_df = get_data(elasticity, dummy=True) def test_crabnet(): cb = CrabNet( compute_device="cpu", verbose=True, losscurve=False, learningcurve=False, epochs=40, ) cb.fit(train_df) train_pred, train_sigma = cb.predict(val_df, return_uncertainty=True) return train_pred, train_sigma def test_crabnet_300_epochs(): cb = CrabNet( compute_device="cpu", verbose=True, losscurve=False, learningcurve=False, epochs=300, ) cb.fit(train_df) train_pred, train_sigma = cb.predict(val_df, return_uncertainty=True) return train_pred, train_sigma def test_extend_features(): train_df["state_var0"] = np.random.rand(train_df.shape[0]) val_df["state_var0"] = np.random.rand(val_df.shape[0]) cb = CrabNet(verbose=True, extend_features=["state_var0"], epochs=40) cb.fit(train_df) train_pred, train_sigma = cb.predict(val_df, return_uncertainty=True) return train_pred, train_sigma def test_matbench_expt_gap(): t0 = time() mb = MatbenchBenchmark(autoload=False, subset=["matbench_expt_gap"]) for task in mb.tasks: task.load() for fold in [task.folds[0]]: # Inputs are either chemical compositions as strings # or crystal structures as pymatgen.Structure objects. # Outputs are either floats (regression tasks) or bools (classification tasks) train_inputs, train_outputs = task.get_train_and_val_data(fold) train_df = pd.DataFrame({"formula": train_inputs, "target": train_outputs}) # Get testing data test_inputs = task.get_test_data(fold, include_target=False) test_df = pd.DataFrame( {"formula": test_inputs, "target": np.zeros(test_inputs.shape[0])} ) # adjust parameters to have it run quickly on CPU crab = CrabNet( epochs=80, d_model=64, batch_size=256, heads=2, out_hidden=list(
np.array([1024, 512, 256, 128])
numpy.array
# Copyrighrt 2020, by the California Institute of Technology. # ALL RIGHTS RESERVED. United States Government Sponsorship acknowledged. # Any commercial use must be negotiated with the Office of Technology Transfer at the California Institute of Technology. # This software may be subject to U.S. export control laws. # By accepting this software, the user agrees to comply with all applicable U.S. export laws and regulations. # User has the responsibility to obtain export licenses, or other export authority as may be required before exporting # such information to foreign countries or providing access to foreign persons. # Codes last tested 05 April 2020 by MW and IF import uuid import urllib3 import os import xarray as xr import numpy as np import netCDF4 as nc4 import imp import datetime import swath_references as ref from bs4 import BeautifulSoup ######################################################################################################################## # these are some tools to help generate the metadata ######################################################################################################################## #step 1: read in the variables from the regridded file def read_regridded_swath(filePath): print(' Opening ' + str(filePath)) if filePath.exists(): data = xr.open_dataset(str(filePath)) else: print(' regridded swath file not found! aborting!') exit() variables = [] variableNames = [] coordinates = [] coordinateNames = [] for data_var in data.data_vars.keys(): if np.size(
np.array(data[data_var])
numpy.array
# Practice sites #https://www.machinelearningplus.com/python/101-numpy-exercises-python/ #http://www.cs.umd.edu/~nayeem/courses/MSML605/files/04_Lec4_List_Numpy.pdf #https://www.gormanalysis.com/blog/python-numpy-for-your-grandma/ #https://nickmccullum.com/advanced-python/numpy-indexing-assignment/ # 1. Import numpy as np and see the version # Difficulty Level: L1 # Q. Import numpy as np and print the version number. ##? 1. Import numpy as np and see the version # Difficulty Level: L1 # Q. Import numpy as np and print the version number. import numpy as np print(np.__version__) ##? 2. How to create a 1D array? # Difficulty Level: L1 # Q. Create a 1D array of numbers from 0 to 9 arr = np.arange(10) arr ##? 3. How to create a boolean array? # Difficulty Level: L1 # Q. Create a 3×3 numpy array of all True’s arr = np.full((3,3), True, dtype=bool) arr ##? 4. How to extract items that satisfy a given condition from 1D array? # Difficulty Level: L1 # Q. Extract all odd numbers from arr arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) arr[arr % 2 == 1] ##? 5. How to replace items that satisfy a condition with another value in numpy array? # Difficulty Level: L1 # Q. Replace all odd numbers in arr with -1 arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) arr[arr % 2 == 1] = -1 arr ##? 6. How to replace items that satisfy a condition without affecting the original array? # Difficulty Level: L2 # Q. Replace all odd numbers in arr with -1 without changing arr arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) #1 np.where out = np.where(arr % 2 == 1, -1, arr) out #2 list comp out = np.array([-1 if x % 2 == 1 else x for x in arr]) out ##? 7. How to reshape an array? # Difficulty Level: L1 # Q. Convert a 1D array to a 2D array with 2 rows arr = np.arange(10) arr.reshape(2, -1) # Setting y to -1 automatically decides number of columns. # Could do the same with arr.reshape(2, 5) ##? 8. How to stack two arrays vertically? # Difficulty Level: L2 # Q. Stack arrays a and b vertically a = np.arange(10).reshape(2, -1) b = np.repeat(1, 10).reshape(2, -1) #1 np.vstack([a, b]) #2 np.concatenate([a, b], axis=0) #3 np.r_[a, b] # 9. How to stack two arrays horizontally? # Difficulty Level: L2 # Q. Stack the arrays a and b horizontally. a = np.arange(10).reshape(2, -1) b = np.repeat(1, 10).reshape(2, -1) #1 np.hstack([a, b]) #2 np.concatenate([a, b], axis=1) #3 np.c_[a, b] ##? 10. How to generate custom sequences in numpy without hardcoding? # Difficulty Level: L2 # Q. Create the following pattern without hardcoding. # Use only numpy functions and the below input array a. a = np.array([1,2,3]) np.r_[np.repeat(a,3), np.tile(a, 3)] ##? 11. How to get the common items between two python numpy arrays? # Difficulty Level: L2 # Q. Get the common items between a and b a = np.array([1,2,3,2,3,4,3,4,5,6]) b = np.array([7,2,10,2,7,4,9,4,9,8]) np.intersect1d(a, b) ##? 12. How to remove from one array those items that exist in another? # Difficulty Level: L2 # Q. From array a remove all items present in array b a = np.array([1,2,3,4,5]) b = np.array([5,6,7,8,9]) # From 'a' remove all of 'b' np.setdiff1d(a,b) ##? 13. How to get the positions where elements of two arrays match? # Difficulty Level: L2 # Q. Get the positions where elements of a and b match a = np.array([1,2,3,2,3,4,3,4,5,6]) b = np.array([7,2,10,2,7,4,9,4,9,8]) np.where(a==b) # 14. How to extract all numbers between a given range from a numpy array? # Difficulty Level: L2 # Q. Get all items between 5 and 10 from a. a = np.array([2, 6, 1, 9, 10, 3, 27]) #1 idx = np.where((a>=5) & (a<=10)) a[idx] #2 idx = np.where(np.logical_and(a >= 5, a <= 10)) a[idx] #3 a[(a >= 5) & (a <= 10)] ##? 15. How to make a python function that handles scalars to work on numpy arrays? # Difficulty Level: L2 # Q. Convert the function maxx that works on two scalars, to work on two arrays. def maxx(x:np.array, y:np.array): """Get the maximum of two items""" if x >= y: return x else: return y a = np.array([5, 7, 9, 8, 6, 4, 5]) b = np.array([6, 3, 4, 8, 9, 7, 1]) pair_max = np.vectorize(maxx, otypes=[float]) pair_max(a, b) ##? 16. How to swap two columns in a 2d numpy array? # Difficulty Level: L2 # Q. Swap columns 1 and 2 in the array arr. arr = np.arange(9).reshape(3,3) arr arr[:, [1, 0, 2]] #by putting brackets inside the column slice. You have access to column indices ##? 17. How to swap two rows in a 2d numpy array? # Difficulty Level: L2 # Q. Swap rows 1 and 2 in the array arr: arr = np.arange(9).reshape(3,3) arr arr[[0, 2, 1], :] #same goes here for the rows ##? 18. How to reverse the rows of a 2D array? # Difficulty Level: L2 # Q. Reverse the rows of a 2D array arr. # Input arr = np.arange(9).reshape(3,3) arr arr[::-1, :] #or arr[::-1] # 19. How to reverse the columns of a 2D array? # Difficulty Level: L2 # Q. Reverse the columns of a 2D array arr. # Input arr = np.arange(9).reshape(3,3) arr arr[:,::-1] ##? 20. How to create a 2D array containing random floats between 5 and 10? # Difficulty Level: L2 # Q. Create a 2D array of shape 5x3 to contain random decimal numbers between 5 and 10. arr = np.arange(9).reshape(3,3) #1 rand_arr = np.random.randint(low=5, high=10, size=(5,3)) + np.random.random((5,3)) rand_arr #2 rand_arr = np.random.uniform(5, 10, size=(5,3)) rand_arr ##? 21. How to print only 3 decimal places in python numpy array? # Difficulty Level: L1 # Q. Print or show only 3 decimal places of the numpy array rand_arr. rand_arr = np.random.random((5,3)) rand_arr rand_arr = np.random.random([5,3]) np.set_printoptions(precision=3) rand_arr[:4] ##? 22. How to pretty print a numpy array by suppressing the scientific notation (like 1e10)? # Difficulty Level: L1 # Q. Pretty print rand_arr by suppressing the scientific notation (like 1e10) #Reset printoptions np.set_printoptions(suppress=False) # Create the random array np.random.seed(100) rand_arr = np.random.random([3,3])/1e3 rand_arr #Set precision and suppress e notation np.set_printoptions(suppress=True, precision=6) rand_arr ##? 23. How to limit the number of items printed in output of numpy array? # Difficulty Level: L1 # Q. Limit the number of items printed in python numpy array a to a maximum of 6 elements. a = np.arange(15) #set the elements to print in threshold np.set_printoptions(threshold=6) a # reset the threshold to default np.set_printoptions(threshold=1000) ##? 24. How to print the full numpy array without truncating # Difficulty Level: L1 # Q. Print the full numpy array a without truncating. a = np.arange(15) # reset the threshold to default np.set_printoptions(threshold=1000) a ##? 25. How to import a dataset with numbers and texts keeping the text intact in python numpy? # Difficulty Level: L2 # Q. Import the iris dataset keeping the text intact. url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data' iris = np.genfromtxt(url, delimiter=',', dtype="object") names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species') iris[:3] ##? 26. How to extract a particular column from 1D array of tuples? # Difficulty Level: L2 # Q. Extract the text column species from the 1D iris imported in previous question. url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data' iris_1d = np.genfromtxt(url, delimiter=',', dtype=None, encoding = "UTF-8") species = np.array([col[4] for col in iris_1d]) species[:5] ##? 27. How to convert a 1d array of tuples to a 2d numpy array? # Difficulty Level: L2 # Q. Convert the 1D iris to 2D array iris_2d by omitting the species text field. url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data' iris_1d = np.genfromtxt(url, delimiter=',', dtype=None, encoding = "UTF-8") #1 no_species_2d = np.array([row.tolist()[:4] for row in iris_1d]) no_species_2d[:3] #2 # Can directly specify columns to use with the "usecols" method url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data' no_species_2d = np.genfromtxt(url, delimiter=',', dtype=None, encoding = "UTF-8", usecols=[0,1,2,3]) no_species_2d[:3] ##? 28. How to compute the mean, median, standard deviation of a numpy array? # Difficulty: L1 # Q. Find the mean, median, standard deviation of iris's sepallength (1st column) url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data' iris_1d = np.genfromtxt(url, delimiter=',', dtype=None, encoding="utf-8") sepal = np.genfromtxt(url, delimiter=',', dtype=float, usecols=[0]) # or sepal = np.array([col[0] for col in iris_1d]) # or sepal = np.array([col.tolist()[0] for col in iris_1d]) mu, med, sd = np.mean(sepal), np.median(sepal), np.std(sepal) np.set_printoptions(precision=2) print(f'The mean is {mu} \nThe median is {med} \nThe standard deviation is {sd}') ##? 29. How to normalize an array so the values range exactly between 0 and 1? # Difficulty: L2 # Q. Create a normalized form of iris's sepallength whose values range exactly between 0 and 1 so that the minimum has value 0 and maximum has value 1. url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data' iris_1d = np.genfromtxt(url, delimiter=',', dtype=None, encoding="utf-8") sepal = np.genfromtxt(url, delimiter=',', dtype=float, usecols=[0]) #1 smax, smin = np.max(sepal), np.min(sepal) S = (sepal-smin)/(smax-smin) S #2 S = (sepal-smin)/sepal.ptp() S ##? 30. How to compute the softmax score? # Difficulty Level: L3 # Q. Compute the softmax score of sepallength. url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data' sepal = np.genfromtxt(url, delimiter=',', dtype=float, usecols=[0], encoding="utf-8") #or sepal = np.genfromtxt(url, delimiter=',', dtype='object') sepal = np.array([float(row[0]) for row in sepal]) # https://stackoverflow.com/questions/34968722/how-to-implement-the-softmax-function-in-python""" #1 def softmax(x): e_x = np.exp(x - np.max(x)) return e_x/ e_x.sum(axis=0) softmax(sepal) ##? 31. How to find the percentile scores of a numpy array? # Difficulty Level: L1 # Q. Find the 5th and 95th percentile of iris's sepallength url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data' sepal = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0]) np.percentile(sepal, q=[5, 95]) ##? 32. How to insert values at random positions in an array? # Difficulty Level: L2 # Q. Insert np.nan values at 20 random positions in iris_2d dataset url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data' iris_2d = np.genfromtxt(url, delimiter=',', dtype='float', encoding="utf-8") #Can change object to float if you want #1 i, j = np.where(iris_2d) # i, j contain the row numbers and column numbers of the 600 elements of Irix_x np.random.seed(100) iris_2d[np.random.choice(i, 20), np.random.choice((j), 20)] = np.nan #Checking nans in 2nd column np.isnan(iris_2d[:, 1]).sum() #Looking over all rows/columns np.isnan(iris_2d[:, :]).sum() #2 np.random.seed(100) iris_2d[np.random.randint(150, size=20), np.random.randint(4, size=20)]=np.nan #Looking over all rows/columns np.isnan(iris_2d[:, :]).sum() ##? 33. How to find the position of missing values in numpy array? # Difficulty Level: L2 # Q. Find the number and position of missing values in iris_2d's sepallength (1st column) # ehh already did that? Lol. Using above filtered array from method 2 in # question 32 np.isnan(iris_2d[:, 0]).sum() #Indexes of which can be found with np.where(np.isnan(iris_2d[:, 0])) ##? 34. How to filter a numpy array based on two or more conditions? # Difficulty Level: L3 # Q. Filter the rows of iris_2d that has petallength (3rd column) > 1.5 # and sepallength (1st column) < 5.0 url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data' iris_2d = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0,1,2,3]) filt_cond = (iris_2d[:,0] < 5.0) & (iris_2d[:, 2] > 1.5) iris_2d[filt_cond] ##? 35. How to drop rows that contain a missing value from a numpy array? # Difficulty Level: L3: # Q. Select the rows of iris_2d that does not have any nan value. url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data' iris_2d = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0,1,2,3]) iris_2d[np.random.randint(150, size=20), np.random.randint(4, size=20)] = np.nan #1 #No direct numpy implementation iris_drop = np.array([~np.any(np.isnan(row)) for row in iris_2d]) #Look at first 5 rows of drop iris_2d[iris_drop][:5] #2 iris_2d[np.sum(np.isnan(iris_2d), axis=1)==0][:5] ##? 36. How to find the correlation between two columns of a numpy array? # Difficulty Level: L2 # Q. Find the correlation between SepalLength(1st column) and PetalLength(3rd column) in iris_2d url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data' iris_2d = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0,1,2,3]) #1 np.corrcoef(iris_2d[:, 0], iris_2d[:, 2])[0, 1] #2 from scipy.stats.stats import pearsonr corr, p_val = pearsonr(iris_2d[:, 0], iris_2d[:, 2]) print(corr) # Correlation coef indicates the degree of linear relationship between two numeric variables. # It can range between -1 to +1. # The p-value roughly indicates the probability of an uncorrelated system producing # datasets that have a correlation at least as extreme as the one computed. # The lower the p-value (<0.01), greater is the significance of the relationship. # It is not an indicator of the strength. #> 0.871754157305 ##? 37. How to find if a given array has any null values? # Difficulty Level: L2 # Q. Find out if iris_2d has any missing values. url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data' iris_2d = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0,1,2,3]) np.isnan(iris_2d[:, :]).any() ##? 38. How to replace all missing values with 0 in a numpy array? # Difficulty Level: L2 # Q. Replace all occurrences of nan with 0 in numpy array url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data' iris_2d = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0,1,2,3]) iris_2d[np.random.randint(150, size=20), np.random.randint(4, size=20)] = np.nan #Check for nans np.any(~np.isnan(iris_2d[:, :])) #Set Indexes of of the nans = 0 iris_2d[np.isnan(iris_2d)] = 0 #Check the same indexes np.where(iris_2d==0) #Check first 10 rows iris_2d[:10] ##? 39. How to find the count of unique values in a numpy array? # Difficulty Level: L2 # Q. Find the unique values and the count of unique values in iris's species # Input url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data' iris = np.genfromtxt(url, delimiter=',', dtype='object', encoding="utf-8") names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species') #1 species = np.array([row.tolist()[4] for row in iris]) np.unique(species, return_counts=True) #2 np.unique(iris[:, 4], return_counts=True) ##? 40. How to convert a numeric to a categorical (text) array? # Difficulty Level: L2 # Q. Bin the petal length (3rd) column of iris_2d to form a text array, such that if petal length is: # Less than 3 --> 'small' # 3-5 --> 'medium' # '>=5 --> 'large' # Input url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data' iris = np.genfromtxt(url, delimiter=',', dtype='object') names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species') #1 #Bin the petal length petal_length_bin = np.digitize(iris[:, 2].astype('float'), [0, 3, 5, 10]) #Map it to respective category. label_map = {1: 'small', 2: 'medium', 3: 'large', 4: np.nan} petal_length_cat = [label_map[x] for x in petal_length_bin] petal_length_cat[:4] #or petal_length_cat = np.array(list(map(lambda x: label_map[x], petal_length_bin))) petal_length_cat[:4] ##? 41. How to create a new column from existing columns of a numpy array? # Difficulty Level: L2 # Q. Create a new column for volume in iris_2d, # where volume is (pi x petallength x sepal_length^2)/3 # Input url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data' iris_2d = np.genfromtxt(url, delimiter=',', dtype='object') # Compute volume sepallength = iris_2d[:, 0].astype('float') petallength = iris_2d[:, 2].astype('float') volume = (np.pi * petallength*sepallength**2)/3 # Introduce new dimension to match iris_2d's volume = volume[:, np.newaxis] # Add the new column out = np.hstack([iris_2d, volume]) out[:4] ##? 42. How to do probabilistic sampling in numpy? # Difficulty Level: L3 # Q. Randomly sample iris's species such that setosa # is twice the number of versicolor and virginica # Import iris keeping the text column intact url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data' iris = np.genfromtxt(url, delimiter=',', dtype='object') #Get species column species = iris[:, 4] #1 Generate Probablistically. np.random.seed(100) a = np.array(['Iris-setosa', 'Iris-versicolor', 'Iris-virginica']) out = np.random.choice(a, 150, p=[0.5, 0.25, 0.25]) #Checking counts np.unique(out[:], return_counts=True) #2 Probablistic Sampling #preferred np.random.seed(100) probs = np.r_[np.linspace(0, 0.500, num=50), np.linspace(0.501, .0750, num=50), np.linspace(.751, 1.0, num=50)] index = np.searchsorted(probs, np.random.random(150)) species_out = species[index] print(np.unique(species_out, return_counts=True)) # Approach 2 is preferred because it creates an index variable that can be # used to sample 2d tabular data. ##? 43. How to get the second largest value of an array when grouped by another array? # Difficulty Level: L2 # Q. What is the value of second longest petallength of species setosa # Input url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data' iris = np.genfromtxt(url, delimiter=',', dtype='object') names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species') petal_setosa = iris[iris[:, 4]==b'Iris-setosa', [2]].astype('float') #1 #Note. Option 1 will return the second largest value 1.7, but with no repeats (np.unique() np.unique(np.sort(petal_setosa))[-2] #Note, options 2 and 3. these will return 1.9 because that is the second largest value. #2 petal_setosa[np.argpartition(petal_setosa, -2)[-2]] #3 petal_setosa[petal_setosa.argsort()[-2]] #4 unq = np.unique(petal_setosa) unq[np.argpartition(unq, -2)[-2]] #Note: This method still gives back 1.9. As that is the 2nd largest value, #So you'd have to filter for unique values. Then do the argpart on the unq array ##? 44. How to sort a 2D array by a column # Difficulty Level: L2 # Q. Sort the iris dataset based on sepallength column. url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data' # dtype = [('sepallength', float), ('sepalwidth', float), ('petallength', float), ('petalwidth', float),('species', 'S10')] iris = np.genfromtxt(url, delimiter=',', dtype="object") names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species') #1 print(iris[iris[:,0].argsort()][:20]) #2 #!Only captures first column to sort np.sort(iris[:, 0], axis=0) #3 sorted(iris, key=lambda x: x[0]) ##? 45. How to find the most frequent value in a numpy array? # Difficulty Level: L1 # Q. Find the most frequent value of petal length (3rd column) in iris dataset. url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data' iris = np.genfromtxt(url, delimiter=',', dtype='object') names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species') vals, counts = np.unique(iris[:, 2], return_counts=True) print(vals[np.argmax(counts)]) ##? 46. How to find the position of the first occurrence of a value greater than a given value? # Difficulty Level: L2 # Q. Find the position of the first occurrence of a value greater than 1.0 in petalwidth 4th column of iris dataset. url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data' iris = np.genfromtxt(url, delimiter=',', dtype='object') #1 np.argwhere(iris[:, 3].astype(float) > 1.0)[0] # 47. How to replace all values greater than a given value to a given cutoff? # Difficulty Level: L2 # Q. From the array a, replace all values greater than 30 to 30 and less than 10 to 10. np.set_printoptions(precision=2) np.random.seed(100) a = np.random.uniform(1,50, 20) #1 np.clip(a, a_min=10, a_max=30) #2 np.where(a < 10, 10, np.where(a > 30, 30, a)) #Tangent - Filtering condition #Say we only want the values above 10 and below 30. Or operator | should help there. filt_cond = (a < 10) | (a > 30) a[filt_cond] ##? 48. How to get the positions of top n values from a numpy array? # Difficulty Level: L2 # Q. Get the positions of top 5 maximum values in a given array a. np.random.seed(100) a = np.random.uniform(1,50, 20) #1 a.argsort()[:5] #2 np.argpartition(-a, 5)[:5] # or (order is reversed though) np.argpartition(a, -5)[-5:] #To get the values. #1 a[a.argsort()][-5:] #2 np.sort(a)[-5:] #3 np.partition(a, kth=-5)[-5:] #4 a[np.argpartition(-a, 5)][:5] #or a[np.argpartition(a, -5)][-5:] ##? 49. How to compute the row wise counts of all possible values in an array? # Difficulty Level: L4 # Q. Compute the counts of unique values row-wise. np.random.seed(100) arr = np.random.randint(1,11,size=(6, 10)) #Add a column of of the counts of each row #Tangent fun counts = np.array([
np.unique(row)
numpy.unique
import numpy as np import matplotlib.pyplot as plt import math import os import os.path as op from datetime import datetime from isstools.conversions import xray from subprocess import call import re import collections import pandas as pd import h5py from pathlib import Path class XASdata: def __init__(self, db = None, **kwargs): self.energy = np.array([]) self.data = np.array([]) self.encoder_file = '' self.i0_file = '' self.it_file = '' self.ir_file = '' self.iff_file = '' self.data_manager = XASDataManager() self.header_read = '' self.db = db def loadADCtrace(self, filename = '', filepath = '/GPFS/xf08id/pizza_box_data/'): keys = ['times', 'timens', 'counter', 'adc'] if os.path.isfile('{}{}'.format(filepath, filename)): df = pd.read_table('{}{}'.format(filepath, filename), delim_whitespace=True, comment='#', names=keys, index_col=False) df['timestamp'] = df['times'] + 1e-9 * df['timens'] #del df['times'] #del df['timens'] df['adc'] = df['adc'].apply(lambda x: (int(x, 16) >> 8) - 0x40000 if (int(x, 16) >> 8) > 0x1FFFF else int(x, 16) >> 8) * 7.62939453125e-05 return df.iloc[:, 4:1:-1] else: return -1 def loadADCtraceDB(self, uid, stream_name): hdr = self.db[uid] dd = [_['data'] for _ in self.db.get_events(hdr, stream_name=stream_name, fill=True)] result = {} for chunk in dd: for key in chunk.keys(): if key in result: result[key] = np.concatenate((result[key], chunk[key])) continue result[key] = chunk[key] columns = list(dd[0][stream_name][0]._asdict().keys()) df = pd.DataFrame(result[stream_name], columns=columns) df['timestamp'] = df['ts_s'] + 1e-9 * df['ts_ns'] df['adc'] = df['adc'].apply(lambda x: (x >> 8) - 0x40000 if (x >> 8) > 0x1FFFF else x >> 8) * 7.62939453125e-05 return df def loadENCtrace(self, filename = '', filepath = '/GPFS/xf08id/pizza_box_data/'): keys = ['times', 'timens', 'encoder', 'counter', 'di'] if os.path.isfile('{}{}'.format(filepath, filename)): df = pd.read_table('{}{}'.format(filepath, filename), delim_whitespace=True, comment='#', names=keys, index_col=False) df['timestamp'] = df['times'] + 1e-9 * df['timens'] df['encoder'] = df['encoder'].apply(lambda x: int(x) if int(x) <= 0 else -(int(x) ^ 0xffffff - 1)) return df.iloc[:, [5, 2]] else: return -1 def loadENCtraceDB(self, uid, stream_name): hdr = self.db[uid] dd = [_['data'] for _ in self.db.get_events(hdr, stream_name=stream_name, fill=True)] result = {} for chunk in dd: for key in chunk.keys(): if key in result: result[key] = np.concatenate((result[key], chunk[key])) continue result[key] = chunk[key] columns = list(dd[0][stream_name][0]._asdict().keys()) df = pd.DataFrame(result[stream_name], columns=columns) df['timestamp'] = df['ts_s'] + 1e-9 * df['ts_ns'] df['encoder'] = df['encoder'].apply(lambda x: x if x <= 0 else -(x ^ 0xffffff - 1)) return df.iloc[:, [5, 2]] def loadTRIGtrace(self, filename = '', filepath = '/GPFS/xf08id/pizza_box_data/'): keys = ['times', 'timens', 'encoder', 'counter', 'di'] if os.path.isfile('{}{}'.format(filepath, filename)): df = pd.read_table('{}{}'.format(filepath, filename), delim_whitespace=True, comment='#', names=keys, index_col=False) df['timestamp'] = df['times'] + 1e-9 * df['timens'] df = df.iloc[::2] #df = df[df['counter'] % 2 == 0] return df.iloc[:, [5, 3]] else: return -1 def loadTRIGtraceDB(self, uid, stream_name): hdr = self.db[uid] dd = [_['data'] for _ in self.db.get_events(hdr, stream_name=stream_name, fill=True)] result = {} for chunk in dd: for key in chunk.keys(): if key in result: result[key] = np.concatenate((result[key], chunk[key])) continue result[key] = chunk[key] columns = list(dd[0][stream_name][0]._asdict().keys()) df = pd.DataFrame(result[stream_name], columns=columns) df['timestamp'] = df['ts_s'] + 1e-9 * df['ts_ns'] df = df.iloc[::2] return df.iloc[:, [5, 3]] def read_header(self, filename): test = '' line = '#' with open(filename) as myfile: while line[0] == '#': line = next(myfile) test += line return test[:-len(line)] class XASdataGeneric(XASdata): def __init__(self, pulses_per_deg, db = None, db_analysis = None, *args, **kwargs): super().__init__(*args, **kwargs) self.arrays = {} self.interp_arrays = {} self.db = db self.db_analysis = db_analysis self.pulses_per_deg = pulses_per_deg #if self.db is None: # print('The databroker was not passed as argument to the parser.\nSome features will be disabled.') self.uid = '' def process(self, uid): self.load(uid) self.uid = uid self.interpolate() def load(self, uid): #if self.db is None: # raise Exception('The databroker was not passed as argument to the parser. This feature is disabled.') self.arrays = {} self.interp_arrays = {} self.uid = uid has_encoder = False for i in self.db[uid]['descriptors']: if 'filename' in i['data_keys'][i['name']]: name = i['name'] if name == 'pb9_enc1' or name == 'hhm_theta': has_encoder = name if 'devname' in i['data_keys'][i['name']]: name = i['data_keys'][i['name']]['devname'] if name == 'hhm_theta': has_encoder = name if i['data_keys'][i['name']]['source'] == 'pizzabox-di-file': data = self.loadTRIGtrace(i['data_keys'][i['name']]['filename'], '') if i['data_keys'][i['name']]['source'] == 'pizzabox-adc-file': data = self.loadADCtrace(i['data_keys'][i['name']]['filename'], '') if i['name'] + ' offset' in self.db[uid]['start'] and type(data) == pd.core.frame.DataFrame: data.iloc[:, 1] = data.iloc[:, 1] - self.db[uid]['start'][i['name'] + ' offset'] if i['data_keys'][i['name']]['source'] == 'pizzabox-enc-file': data = self.loadENCtrace(i['data_keys'][i['name']]['filename'], '') #if type(data) == np.ndarray: self.arrays[name] = data if has_encoder is not False: energy = self.arrays.get(has_encoder).copy() if 'angle_offset' in self.db[uid]['start']: energy.iloc[:, 1] = xray.encoder2energy(energy.iloc[:, 1], self.pulses_per_deg, -float(self.db[uid]['start']['angle_offset'])) energy.columns = ['timestamp', 'energy'] del self.arrays[has_encoder] self.arrays['energy'] = energy def loadDB(self, uid): # if self.db is None: # raise Exception('The databroker was not passed as argument to the parser. This feature is disabled.') self.arrays = {} self.interp_arrays = {} self.uid = uid has_encoder = False for i in self.db[uid]['descriptors']: stream_name = i['name'] name = i['name'] if name == 'pb9_enc1' or name == 'hhm_theta': has_encoder = name if 'devname' in i['data_keys'][i['name']]: name = i['data_keys'][i['name']]['devname'] if name == 'hhm_theta': has_encoder = name if i['data_keys'][i['name']]['source'] == 'pizzabox-di-file': data = self.loadTRIGtraceDB(uid, stream_name) if i['data_keys'][i['name']]['source'] == 'pizzabox-adc-file': data = self.loadADCtraceDB(uid, stream_name) if i['name'] + ' offset' in self.db[uid]['start'] and type(data) == pd.core.frame.DataFrame: data.iloc[:, 1] = data.iloc[:, 1] - self.db[uid]['start'][i['name'] + ' offset'] if i['data_keys'][i['name']]['source'] == 'pizzabox-enc-file': data = self.loadENCtraceDB(uid, stream_name) # if type(data) == np.ndarray: self.arrays[name] = data if has_encoder is not False: energy = self.arrays.get(has_encoder).copy() if 'angle_offset' in self.db[uid]['start']: energy.iloc[:, 1] = xray.encoder2energy(energy.iloc[:, 1], self.pulses_per_deg, -float(self.db[uid]['start']['angle_offset'])) energy.columns = ['timestamp', 'energy'] del self.arrays[has_encoder] self.arrays['energy'] = energy def read_header(self, filename): if filename[-3:] == 'txt' or filename[-3:] == 'dat': test = '' line = '#' with open(filename) as myfile: while line[0] == '#': line = next(myfile) test += line return test[:-len(line)] elif filename[-4:] == 'hdf5': f = h5py.File(filename, mode='r') header = dict(f.attrs) f.close() del header['start_time'] del header['stop_time'] del header['name'] header = '\n'.join([f'# {key}: {header[key]}' for key in header]) + '\n#\n#' return header def loadInterpFile(self, filename): self.arrays = {} self.interp_arrays = {} if not op.exists(filename): raise IOError(f'The requested file {filename} does not exist.') header = self.read_header(filename) self.uid = header[header.find('UID') + 5: header.find('\n', header.find('UID'))] keys = header[header.rfind('#'):][1:-1].split() timestamp_index = -1 if 'Timestamp (s)' in keys: timestamp_index = keys.index('Timestamp (s)') elif 'timestamp' in keys: timestamp_index = keys.index('timestamp') df = pd.read_table(filename, delim_whitespace=True, comment='#', names=keys, index_col=False).sort_values(keys[1]) df['1'] = pd.Series(np.ones(len(df.iloc[:, 0])), index=df.index) self.interp_df = df for index, key in enumerate(df.keys()): if index != timestamp_index: self.interp_arrays[key] = np.array([df.iloc[:, timestamp_index].values, df.iloc[:, index]]).transpose() self.interp_arrays['1'] = np.array([df.iloc[:, timestamp_index].values, np.ones(len(df.iloc[:, 0]))]).transpose() def loadInterpFileHDF5(self, filename): self.arrays = {} self.interp_arrays = {} if not op.exists(filename): raise IOError(f'The requested file {filename} does not exist.') # opening a file: f = h5py.File(filename, mode='r') df = pd.DataFrame({key: value for key, value in zip(f.keys(), f.values())}) self.interp_df = df self.md = dict(f.attrs) self.uid = self.md['real_uid'] #for attr in f.attrs: # print(attr, f.attrs[attr]) keys = list(f.keys()) if 'timestamp' in keys: timestamp_index = keys.index('timestamp') for index, key in enumerate(df.keys()): if index != timestamp_index: self.interp_arrays[key] = np.array([df.iloc[:, timestamp_index].values, df.iloc[:, index]]).transpose() self.interp_arrays['1'] = np.array([df.iloc[:, timestamp_index].values, np.ones(len(df.iloc[:, 0]))]).transpose() f.close() def loadInterpFromDB(self, uid): if self.db_analysis is None: raise IOError('No db_analysis was passed to the parser in the initialization') hdr = self.db_analysis[uid] dd = [_['data'] for _ in self.db_analysis.get_events(hdr, stream_name='interpolated', fill=True)] result = {} for chunk in [chunk['interpolated'] for chunk in dd]: for key in chunk.keys(): if key in result: result[key] = np.concatenate((result[key], chunk[key])) continue result[key] = chunk[key] df = pd.DataFrame(result) if 'Ones' in df.columns: new_keys = list(df.columns) new_keys[new_keys.index('Ones')] = '1' df.columns = new_keys self.interp_df = df keys = list(df.keys()) if 'timestamp' in keys: timestamp_index = keys.index('timestamp') for index, key in enumerate(df.keys()): if index != timestamp_index: self.interp_arrays[key] = np.array([df.iloc[:, timestamp_index].values, df.iloc[:, index]]).transpose() self.interp_arrays['1'] = np.array([df.iloc[:, timestamp_index].values, np.ones(len(df.iloc[:, 0]))]).transpose() def interpolate(self, key_base = 'i0'): min_timestamp = max([self.arrays.get(key).iloc[0, 0] for key in self.arrays]) max_timestamp = min([self.arrays.get(key).iloc[len(self.arrays.get(key)) - 1, 0] for key in self.arrays if len(self.arrays.get(key).iloc[:, 0]) > 5]) try: if key_base not in self.arrays.keys(): raise ValueError('Could not find "{}" in the loaded scan. Pick another key_base for the interpolation.'.format(key_base)) except ValueError as err: print(err.args[0], '\nAborted...') return timestamps = self.arrays[key_base].iloc[:,0] condition = timestamps < min_timestamp timestamps = timestamps[np.sum(condition):] condition = timestamps > max_timestamp timestamps = timestamps[: len(timestamps) - np.sum(condition)] #time = [np.mean(array) for array in np.array_split(self.arrays[key_base][:,0], len(timestamps))] for key in self.arrays.keys(): if len(self.arrays.get(key).iloc[:, 0]) > 5 * len(timestamps): time = [
np.mean(array)
numpy.mean
# This module has been generated automatically from space group information # obtained from the Computational Crystallography Toolbox # """ Space groups This module contains a list of all the 230 space groups that can occur in a crystal. The variable space_groups contains a dictionary that maps space group numbers and space group names to the corresponding space group objects. .. moduleauthor:: <NAME> <<EMAIL>> """ #----------------------------------------------------------------------------- # Copyright (C) 2013 The Mosaic Development Team # # Distributed under the terms of the BSD License. The full license is in # the file LICENSE.txt, distributed as part of this software. #----------------------------------------------------------------------------- import numpy as N class SpaceGroup(object): """ Space group All possible space group objects are created in this module. Other modules should access these objects through the dictionary space_groups rather than create their own space group objects. """ def __init__(self, number, symbol, transformations): """ :param number: the number assigned to the space group by international convention :type number: int :param symbol: the Hermann-Mauguin space-group symbol as used in PDB and mmCIF files :type symbol: str :param transformations: a list of space group transformations, each consisting of a tuple of three integer arrays (rot, tn, td), where rot is the rotation matrix and tn/td are the numerator and denominator of the translation vector. The transformations are defined in fractional coordinates. :type transformations: list """ self.number = number self.symbol = symbol self.transformations = transformations self.transposed_rotations = N.array([N.transpose(t[0]) for t in transformations]) self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2] for t in transformations])) def __repr__(self): return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol)) def __len__(self): """ :return: the number of space group transformations :rtype: int """ return len(self.transformations) def symmetryEquivalentMillerIndices(self, hkl): """ :param hkl: a set of Miller indices :type hkl: Scientific.N.array_type :return: a tuple (miller_indices, phase_factor) of two arrays of length equal to the number of space group transformations. miller_indices contains the Miller indices of each reflection equivalent by symmetry to the reflection hkl (including hkl itself as the first element). phase_factor contains the phase factors that must be applied to the structure factor of reflection hkl to obtain the structure factor of the symmetry equivalent reflection. :rtype: tuple """ hkls = N.dot(self.transposed_rotations, hkl) p = N.multiply.reduce(self.phase_factors**hkl, -1) return hkls, p space_groups = {} transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(1, 'P 1', transformations) space_groups[1] = sg space_groups['P 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(2, 'P -1', transformations) space_groups[2] = sg space_groups['P -1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(3, 'P 1 2 1', transformations) space_groups[3] = sg space_groups['P 1 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(4, 'P 1 21 1', transformations) space_groups[4] = sg space_groups['P 1 21 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(5, 'C 1 2 1', transformations) space_groups[5] = sg space_groups['C 1 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(6, 'P 1 m 1', transformations) space_groups[6] = sg space_groups['P 1 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(7, 'P 1 c 1', transformations) space_groups[7] = sg space_groups['P 1 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(8, 'C 1 m 1', transformations) space_groups[8] = sg space_groups['C 1 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(9, 'C 1 c 1', transformations) space_groups[9] = sg space_groups['C 1 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(10, 'P 1 2/m 1', transformations) space_groups[10] = sg space_groups['P 1 2/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(11, 'P 1 21/m 1', transformations) space_groups[11] = sg space_groups['P 1 21/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(12, 'C 1 2/m 1', transformations) space_groups[12] = sg space_groups['C 1 2/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(13, 'P 1 2/c 1', transformations) space_groups[13] = sg space_groups['P 1 2/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(14, 'P 1 21/c 1', transformations) space_groups[14] = sg space_groups['P 1 21/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(15, 'C 1 2/c 1', transformations) space_groups[15] = sg space_groups['C 1 2/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(16, 'P 2 2 2', transformations) space_groups[16] = sg space_groups['P 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(17, 'P 2 2 21', transformations) space_groups[17] = sg space_groups['P 2 2 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(18, 'P 21 21 2', transformations) space_groups[18] = sg space_groups['P 21 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(19, 'P 21 21 21', transformations) space_groups[19] = sg space_groups['P 21 21 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(20, 'C 2 2 21', transformations) space_groups[20] = sg space_groups['C 2 2 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(21, 'C 2 2 2', transformations) space_groups[21] = sg space_groups['C 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(22, 'F 2 2 2', transformations) space_groups[22] = sg space_groups['F 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(23, 'I 2 2 2', transformations) space_groups[23] = sg space_groups['I 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(24, 'I 21 21 21', transformations) space_groups[24] = sg space_groups['I 21 21 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(25, 'P m m 2', transformations) space_groups[25] = sg space_groups['P m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(26, 'P m c 21', transformations) space_groups[26] = sg space_groups['P m c 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(27, 'P c c 2', transformations) space_groups[27] = sg space_groups['P c c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(28, 'P m a 2', transformations) space_groups[28] = sg space_groups['P m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(29, 'P c a 21', transformations) space_groups[29] = sg space_groups['P c a 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(30, 'P n c 2', transformations) space_groups[30] = sg space_groups['P n c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(31, 'P m n 21', transformations) space_groups[31] = sg space_groups['P m n 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(32, 'P b a 2', transformations) space_groups[32] = sg space_groups['P b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(33, 'P n a 21', transformations) space_groups[33] = sg space_groups['P n a 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(34, 'P n n 2', transformations) space_groups[34] = sg space_groups['P n n 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(35, 'C m m 2', transformations) space_groups[35] = sg space_groups['C m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(36, 'C m c 21', transformations) space_groups[36] = sg space_groups['C m c 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(37, 'C c c 2', transformations) space_groups[37] = sg space_groups['C c c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(38, 'A m m 2', transformations) space_groups[38] = sg space_groups['A m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(39, 'A b m 2', transformations) space_groups[39] = sg space_groups['A b m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(40, 'A m a 2', transformations) space_groups[40] = sg space_groups['A m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(41, 'A b a 2', transformations) space_groups[41] = sg space_groups['A b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(42, 'F m m 2', transformations) space_groups[42] = sg space_groups['F m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(43, 'F d d 2', transformations) space_groups[43] = sg space_groups['F d d 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(44, 'I m m 2', transformations) space_groups[44] = sg space_groups['I m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(45, 'I b a 2', transformations) space_groups[45] = sg space_groups['I b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(46, 'I m a 2', transformations) space_groups[46] = sg space_groups['I m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(47, 'P m m m', transformations) space_groups[47] = sg space_groups['P m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(48, 'P n n n :2', transformations) space_groups[48] = sg space_groups['P n n n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(49, 'P c c m', transformations) space_groups[49] = sg space_groups['P c c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(50, 'P b a n :2', transformations) space_groups[50] = sg space_groups['P b a n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(51, 'P m m a', transformations) space_groups[51] = sg space_groups['P m m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(52, 'P n n a', transformations) space_groups[52] = sg space_groups['P n n a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(53, 'P m n a', transformations) space_groups[53] = sg space_groups['P m n a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(54, 'P c c a', transformations) space_groups[54] = sg space_groups['P c c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(55, 'P b a m', transformations) space_groups[55] = sg space_groups['P b a m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(56, 'P c c n', transformations) space_groups[56] = sg space_groups['P c c n'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(57, 'P b c m', transformations) space_groups[57] = sg space_groups['P b c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(58, 'P n n m', transformations) space_groups[58] = sg space_groups['P n n m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(59, 'P m m n :2', transformations) space_groups[59] = sg space_groups['P m m n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(60, 'P b c n', transformations) space_groups[60] = sg space_groups['P b c n'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(61, 'P b c a', transformations) space_groups[61] = sg space_groups['P b c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(62, 'P n m a', transformations) space_groups[62] = sg space_groups['P n m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(63, 'C m c m', transformations) space_groups[63] = sg space_groups['C m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(64, 'C m c a', transformations) space_groups[64] = sg space_groups['C m c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(65, 'C m m m', transformations) space_groups[65] = sg space_groups['C m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(66, 'C c c m', transformations) space_groups[66] = sg space_groups['C c c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(67, 'C m m a', transformations) space_groups[67] = sg space_groups['C m m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(68, 'C c c a :2', transformations) space_groups[68] = sg space_groups['C c c a :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(69, 'F m m m', transformations) space_groups[69] = sg space_groups['F m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,3,3]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,0,3]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(70, 'F d d d :2', transformations) space_groups[70] = sg space_groups['F d d d :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(71, 'I m m m', transformations) space_groups[71] = sg space_groups['I m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(72, 'I b a m', transformations) space_groups[72] = sg space_groups['I b a m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(73, 'I b c a', transformations) space_groups[73] = sg space_groups['I b c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(74, 'I m m a', transformations) space_groups[74] = sg space_groups['I m m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(75, 'P 4', transformations) space_groups[75] = sg space_groups['P 4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(76, 'P 41', transformations) space_groups[76] = sg space_groups['P 41'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(77, 'P 42', transformations) space_groups[77] = sg space_groups['P 42'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(78, 'P 43', transformations) space_groups[78] = sg space_groups['P 43'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(79, 'I 4', transformations) space_groups[79] = sg space_groups['I 4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(80, 'I 41', transformations) space_groups[80] = sg space_groups['I 41'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(81, 'P -4', transformations) space_groups[81] = sg space_groups['P -4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(82, 'I -4', transformations) space_groups[82] = sg space_groups['I -4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(83, 'P 4/m', transformations) space_groups[83] = sg space_groups['P 4/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(84, 'P 42/m', transformations) space_groups[84] = sg space_groups['P 42/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(85, 'P 4/n :2', transformations) space_groups[85] = sg space_groups['P 4/n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(86, 'P 42/n :2', transformations) space_groups[86] = sg space_groups['P 42/n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(87, 'I 4/m', transformations) space_groups[87] = sg space_groups['I 4/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,5,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(88, 'I 41/a :2', transformations) space_groups[88] = sg space_groups['I 41/a :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(89, 'P 4 2 2', transformations) space_groups[89] = sg space_groups['P 4 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(90, 'P 4 21 2', transformations) space_groups[90] = sg space_groups['P 4 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(91, 'P 41 2 2', transformations) space_groups[91] = sg space_groups['P 41 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(92, 'P 41 21 2', transformations) space_groups[92] = sg space_groups['P 41 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(93, 'P 42 2 2', transformations) space_groups[93] = sg space_groups['P 42 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(94, 'P 42 21 2', transformations) space_groups[94] = sg space_groups['P 42 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(95, 'P 43 2 2', transformations) space_groups[95] = sg space_groups['P 43 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(96, 'P 43 21 2', transformations) space_groups[96] = sg space_groups['P 43 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(97, 'I 4 2 2', transformations) space_groups[97] = sg space_groups['I 4 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(98, 'I 41 2 2', transformations) space_groups[98] = sg space_groups['I 41 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(99, 'P 4 m m', transformations) space_groups[99] = sg space_groups['P 4 m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(100, 'P 4 b m', transformations) space_groups[100] = sg space_groups['P 4 b m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(101, 'P 42 c m', transformations) space_groups[101] = sg space_groups['P 42 c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(102, 'P 42 n m', transformations) space_groups[102] = sg space_groups['P 42 n m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(103, 'P 4 c c', transformations) space_groups[103] = sg space_groups['P 4 c c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(104, 'P 4 n c', transformations) space_groups[104] = sg space_groups['P 4 n c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(105, 'P 42 m c', transformations) space_groups[105] = sg space_groups['P 42 m c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(106, 'P 42 b c', transformations) space_groups[106] = sg space_groups['P 42 b c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(107, 'I 4 m m', transformations) space_groups[107] = sg space_groups['I 4 m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(108, 'I 4 c m', transformations) space_groups[108] = sg space_groups['I 4 c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(109, 'I 41 m d', transformations) space_groups[109] = sg space_groups['I 41 m d'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(110, 'I 41 c d', transformations) space_groups[110] = sg space_groups['I 41 c d'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(111, 'P -4 2 m', transformations) space_groups[111] = sg space_groups['P -4 2 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(112, 'P -4 2 c', transformations) space_groups[112] = sg space_groups['P -4 2 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(113, 'P -4 21 m', transformations) space_groups[113] = sg space_groups['P -4 21 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(114, 'P -4 21 c', transformations) space_groups[114] = sg space_groups['P -4 21 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(115, 'P -4 m 2', transformations) space_groups[115] = sg space_groups['P -4 m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(116, 'P -4 c 2', transformations) space_groups[116] = sg space_groups['P -4 c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(117, 'P -4 b 2', transformations) space_groups[117] = sg space_groups['P -4 b 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(118, 'P -4 n 2', transformations) space_groups[118] = sg space_groups['P -4 n 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(119, 'I -4 m 2', transformations) space_groups[119] = sg space_groups['I -4 m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(120, 'I -4 c 2', transformations) space_groups[120] = sg space_groups['I -4 c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(121, 'I -4 2 m', transformations) space_groups[121] = sg space_groups['I -4 2 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(122, 'I -4 2 d', transformations) space_groups[122] = sg space_groups['I -4 2 d'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(123, 'P 4/m m m', transformations) space_groups[123] = sg space_groups['P 4/m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(124, 'P 4/m c c', transformations) space_groups[124] = sg space_groups['P 4/m c c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(125, 'P 4/n b m :2', transformations) space_groups[125] = sg space_groups['P 4/n b m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(126, 'P 4/n n c :2', transformations) space_groups[126] = sg space_groups['P 4/n n c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(127, 'P 4/m b m', transformations) space_groups[127] = sg space_groups['P 4/m b m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(128, 'P 4/m n c', transformations) space_groups[128] = sg space_groups['P 4/m n c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(129, 'P 4/n m m :2', transformations) space_groups[129] = sg space_groups['P 4/n m m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(130, 'P 4/n c c :2', transformations) space_groups[130] = sg space_groups['P 4/n c c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(131, 'P 42/m m c', transformations) space_groups[131] = sg space_groups['P 42/m m c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(132, 'P 42/m c m', transformations) space_groups[132] = sg space_groups['P 42/m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(133, 'P 42/n b c :2', transformations) space_groups[133] = sg space_groups['P 42/n b c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(134, 'P 42/n n m :2', transformations) space_groups[134] = sg space_groups['P 42/n n m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(135, 'P 42/m b c', transformations) space_groups[135] = sg space_groups['P 42/m b c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(136, 'P 42/m n m', transformations) space_groups[136] = sg space_groups['P 42/m n m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(137, 'P 42/n m c :2', transformations) space_groups[137] = sg space_groups['P 42/n m c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(138, 'P 42/n c m :2', transformations) space_groups[138] = sg space_groups['P 42/n c m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(139, 'I 4/m m m', transformations) space_groups[139] = sg space_groups['I 4/m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(140, 'I 4/m c m', transformations) space_groups[140] = sg space_groups['I 4/m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(141, 'I 41/a m d :2', transformations) space_groups[141] = sg space_groups['I 41/a m d :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,5,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(142, 'I 41/a c d :2', transformations) space_groups[142] = sg space_groups['I 41/a c d :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(143, 'P 3', transformations) space_groups[143] = sg space_groups['P 3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(144, 'P 31', transformations) space_groups[144] = sg space_groups['P 31'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(145, 'P 32', transformations) space_groups[145] = sg space_groups['P 32'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(146, 'R 3 :H', transformations) space_groups[146] = sg space_groups['R 3 :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(147, 'P -3', transformations) space_groups[147] = sg space_groups['P -3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(148, 'R -3 :H', transformations) space_groups[148] = sg space_groups['R -3 :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(149, 'P 3 1 2', transformations) space_groups[149] = sg space_groups['P 3 1 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(150, 'P 3 2 1', transformations) space_groups[150] = sg space_groups['P 3 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(151, 'P 31 1 2', transformations) space_groups[151] = sg space_groups['P 31 1 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(152, 'P 31 2 1', transformations) space_groups[152] = sg space_groups['P 31 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(153, 'P 32 1 2', transformations) space_groups[153] = sg space_groups['P 32 1 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(154, 'P 32 2 1', transformations) space_groups[154] = sg space_groups['P 32 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(155, 'R 3 2 :H', transformations) space_groups[155] = sg space_groups['R 3 2 :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(156, 'P 3 m 1', transformations) space_groups[156] = sg space_groups['P 3 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(157, 'P 3 1 m', transformations) space_groups[157] = sg space_groups['P 3 1 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(158, 'P 3 c 1', transformations) space_groups[158] = sg space_groups['P 3 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(159, 'P 3 1 c', transformations) space_groups[159] = sg space_groups['P 3 1 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(160, 'R 3 m :H', transformations) space_groups[160] = sg space_groups['R 3 m :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(161, 'R 3 c :H', transformations) space_groups[161] = sg space_groups['R 3 c :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(162, 'P -3 1 m', transformations) space_groups[162] = sg space_groups['P -3 1 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(163, 'P -3 1 c', transformations) space_groups[163] = sg space_groups['P -3 1 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(164, 'P -3 m 1', transformations) space_groups[164] = sg space_groups['P -3 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(165, 'P -3 c 1', transformations) space_groups[165] = sg space_groups['P -3 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(166, 'R -3 m :H', transformations) space_groups[166] = sg space_groups['R -3 m :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,-1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,-1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,-1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(167, 'R -3 c :H', transformations) space_groups[167] = sg space_groups['R -3 c :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(168, 'P 6', transformations) space_groups[168] = sg space_groups['P 6'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(169, 'P 61', transformations) space_groups[169] = sg space_groups['P 61'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(170, 'P 65', transformations) space_groups[170] = sg space_groups['P 65'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(171, 'P 62', transformations) space_groups[171] = sg space_groups['P 62'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(172, 'P 64', transformations) space_groups[172] = sg space_groups['P 64'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(173, 'P 63', transformations) space_groups[173] = sg space_groups['P 63'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(174, 'P -6', transformations) space_groups[174] = sg space_groups['P -6'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(175, 'P 6/m', transformations) space_groups[175] = sg space_groups['P 6/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(176, 'P 63/m', transformations) space_groups[176] = sg space_groups['P 63/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(177, 'P 6 2 2', transformations) space_groups[177] = sg space_groups['P 6 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(178, 'P 61 2 2', transformations) space_groups[178] = sg space_groups['P 61 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(179, 'P 65 2 2', transformations) space_groups[179] = sg space_groups['P 65 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(180, 'P 62 2 2', transformations) space_groups[180] = sg space_groups['P 62 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(181, 'P 64 2 2', transformations) space_groups[181] = sg space_groups['P 64 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(182, 'P 63 2 2', transformations) space_groups[182] = sg space_groups['P 63 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(183, 'P 6 m m', transformations) space_groups[183] = sg space_groups['P 6 m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(184, 'P 6 c c', transformations) space_groups[184] = sg space_groups['P 6 c c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(185, 'P 63 c m', transformations) space_groups[185] = sg space_groups['P 63 c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(186, 'P 63 m c', transformations) space_groups[186] = sg space_groups['P 63 m c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(187, 'P -6 m 2', transformations) space_groups[187] = sg space_groups['P -6 m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(188, 'P -6 c 2', transformations) space_groups[188] = sg space_groups['P -6 c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(189, 'P -6 2 m', transformations) space_groups[189] = sg space_groups['P -6 2 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(190, 'P -6 2 c', transformations) space_groups[190] = sg space_groups['P -6 2 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(191, 'P 6/m m m', transformations) space_groups[191] = sg space_groups['P 6/m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(192, 'P 6/m c c', transformations) space_groups[192] = sg space_groups['P 6/m c c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(193, 'P 63/m c m', transformations) space_groups[193] = sg space_groups['P 63/m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(194, 'P 63/m m c', transformations) space_groups[194] = sg space_groups['P 63/m m c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(195, 'P 2 3', transformations) space_groups[195] = sg space_groups['P 2 3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot =
N.array([0,1,0,0,0,1,1,0,0])
numpy.array
"""Tests for the atmos_flux_inversion package. Includes tests using random data, analytic solutions, and checks that different methods agree for simple problems. """ from __future__ import print_function, division import fractions import itertools import operator import os.path import atexit import pickle import math import sys try: from functools import reduce except ImportError: # reduce used to be a builtin pass import numpy as np import numpy.linalg as np_la import numpy.linalg as la import numpy.testing as np_tst import scipy.linalg import scipy.sparse import scipy.optimize # Import from scipy.linalg if not using dask from scipy.linalg import cholesky from scipy.sparse.linalg.interface import LinearOperator, MatrixLinearOperator import unittest2 import pyfftw import pandas as pd import xarray try: import sparse HAVE_SPARSE = True except ImportError: HAVE_SPARSE = False import atmos_flux_inversion.optimal_interpolation import atmos_flux_inversion.correlations import atmos_flux_inversion.covariances import atmos_flux_inversion.variational import atmos_flux_inversion.remapper import atmos_flux_inversion.wrapper import atmos_flux_inversion.linalg import atmos_flux_inversion.noise import atmos_flux_inversion.psas import atmos_flux_inversion.util from atmos_flux_inversion.linalg import tolinearoperator if os.path.exists(".pyfftw.pickle"): with open(".pyfftw.pickle", "rb") as wis_in: WISDOM = pickle.load(wis_in) if isinstance(WISDOM[0], str): WISDOM = [wis.encode("ascii") for wis in WISDOM] pyfftw.import_wisdom(WISDOM) del WISDOM, wis_in def save_wisdom(): """Save accumulated pyfftw wisdom. Saves in hidden file in current directory. Should help speed up subsequent test runs. """ with open(".pyfftw.pickle", "wb") as wis_out: pickle.dump(pyfftw.export_wisdom(), wis_out, 2) atexit.register(save_wisdom) del save_wisdom # If adding other inexact methods to the list tested, be sure to add # those to the `if "var" in name or "psas" in name` and # `if "psas" in name` tests as applicable. ALL_METHODS = ( atmos_flux_inversion.optimal_interpolation.simple, atmos_flux_inversion.optimal_interpolation.fold_common, atmos_flux_inversion.optimal_interpolation.save_sum, atmos_flux_inversion.optimal_interpolation.scipy_chol, atmos_flux_inversion.variational.simple, atmos_flux_inversion.variational.incremental, atmos_flux_inversion.variational.incr_chol, atmos_flux_inversion.psas.simple, atmos_flux_inversion.psas.fold_common, ) ITERATIVE_METHOD_START = 4 """Where the iterative methods start in the above list. Used to test failure modes for these solvers. """ PRECISE_DTYPE = np.float128 """The dtype used to represent analytic results. These are initialized as :class:`fractions.Fraction` then converted to this dtype for the comparison. """ ITERATIVE_STATE_TOLERANCE = 1e-3 ITERATIVE_COVARIANCE_TOLERANCE = 1e-1 EXACT_TOLERANCE = 1e-7 DTYPE = np.float64 """Default dtype for certain tests.""" def getname(method): """Descriptive name for the function. A name combining the function name and module. Parameters ---------- method: callable Returns ------- name: str """ module = method.__module__ group = module.split(".")[-1] variant = method.__name__ return "{group:s} ({variant:s})".format(group=group, variant=variant) def expectFailureIf(condition): """Mark a test as XFAIL based on condition. Wrapper to make :func:`unittest2.expectedFailure` conditional. Parameters ---------- condition: bool Returns ------- decorator: func """ if condition: return unittest2.expectedFailure return lambda fun: fun class TestInversionSimple(unittest2.TestCase): """Test inversions using simple cases.""" def test_scalar_equal_variance(self): """Test a direct measurement of a scalar state.""" bg = np.atleast_1d(2.) bg_cov = np.atleast_2d(1.) obs = np.atleast_1d(3.) obs_cov = np.atleast_2d(1.) obs_op = np.atleast_2d(1.) for method in ALL_METHODS: name = getname(method) with self.subTest(method=name): post, post_cov = method( bg, bg_cov, obs, obs_cov, obs_op) np_tst.assert_allclose(post, 2.5) np_tst.assert_allclose(post_cov, .5) def test_scalar_unequal_variance(self): """Test assimilation of a direct measurement fo a scalar state. Variances not equal. """ bg = np.atleast_1d(15.) bg_cov = np.atleast_2d(2.) obs = np.atleast_1d(14.) obs_cov = np.atleast_2d(1.) obs_op = np.atleast_2d(1.) for method in ALL_METHODS: with self.subTest(method=getname(method)): post, post_cov = method( bg, bg_cov, obs, obs_cov, obs_op) np_tst.assert_allclose( post, PRECISE_DTYPE(14 + fractions.Fraction(1, 3))) np_tst.assert_allclose( post_cov, PRECISE_DTYPE(fractions.Fraction(2, 3))) def test_multiple_priors(self): """Test doing multiple assimilations at once. Simple test. """ bg = np.array([[2., 3.]]) bg_cov = np.atleast_2d(1.) obs = np.array([[3., 4.]]) obs_cov = np.atleast_2d(1.) obs_op = np.atleast_2d(1.) for method in ALL_METHODS[:ITERATIVE_METHOD_START]: name = getname(method) with self.subTest(method=name): post, post_cov = method( bg, bg_cov, obs, obs_cov, obs_op) np_tst.assert_allclose(post, [[2.5, 3.5]]) np_tst.assert_allclose(post_cov, .5) def test_homework_one(self): """Verify that this can reproduce the answers to HW1. Make sure the answers here are within roundoff of the analytic solutions. """ bg = np.array((18., 15., 22.)) bg_var = np.array((2., 2., 2.)) bg_corr = np.array(((1, .5, .25), (.5, 1, .5), (.25, .5, 1))) obs = np.array((19., 14.)) obs_var = np.array((1., 1.)) obs_op = np.array(((1., 0., 0.), (0., 1., 0.))) bg_std = np.sqrt(bg_var) bg_cov = np.diag(bg_std).dot(bg_corr.dot(np.diag(bg_std))) # obs_std = np.sqrt(obs_var) # Assume no correlations between observations. obs_cov = np.diag(obs_var) for method in ALL_METHODS: # Setup for expected degradation of solutions name = getname(method) # The default for assert_allclose cov_rtol = state_rtol = EXACT_TOLERANCE with self.subTest(method=name): # Also tested above in scalar_unequal_variance with self.subTest(problem=3): state_college_index = 1 post, post_cov = method( bg[state_college_index], bg_cov[state_college_index, state_college_index], obs[state_college_index], obs_cov[state_college_index, state_college_index], obs_op[state_college_index, state_college_index]) np_tst.assert_allclose( post, np.asanyarray(14 + fractions.Fraction(1, 3), dtype=PRECISE_DTYPE), rtol=state_rtol) np_tst.assert_allclose( post_cov, np.asanyarray(fractions.Fraction(2, 3), dtype=PRECISE_DTYPE), rtol=cov_rtol) with self.subTest(problem=4): state_college_index = 1 post, post_cov = method( bg, bg_cov, obs[state_college_index], obs_cov[state_college_index, state_college_index], obs_op[state_college_index, :]) np_tst.assert_allclose( post, np.asanyarray((17 + fractions.Fraction(2, 3), 14 + fractions.Fraction(1, 3), 21 + fractions.Fraction(2, 3)), dtype=PRECISE_DTYPE), rtol=state_rtol) with self.subTest(problem=5): pittsburgh_index = 0 post, post_cov = method( bg, bg_cov, obs[pittsburgh_index], obs_cov[pittsburgh_index, pittsburgh_index], obs_op[pittsburgh_index, :]) np_tst.assert_allclose( post, np.asanyarray((18 + fractions.Fraction(2, 3), 15 + fractions.Fraction(1, 3), 22 + fractions.Fraction(1, 6)), PRECISE_DTYPE), rtol=state_rtol) with self.subTest(problem=7): state_college_index = 1 post, post_cov = method( bg, bg_cov, obs[state_college_index], 4 * obs_cov[state_college_index, state_college_index], obs_op[state_college_index, :]) np_tst.assert_allclose( post, np.asanyarray((17 + fractions.Fraction(5, 6), 14 + fractions.Fraction(2, 3), 21 + fractions.Fraction(5, 6)), dtype=PRECISE_DTYPE), rtol=state_rtol) with self.subTest(problem=8): post, post_cov = method( bg, bg_cov, obs, obs_cov, obs_op) # background correlations make this problem not # strictly linear, at least without doing # sequential inversions. Have not verified by hand np_tst.assert_allclose( post, np.asanyarray( (18 + fractions.Fraction(1, 2), 14 + fractions.Fraction(1, 2), 21 + fractions.Fraction(3, 4)), dtype=PRECISE_DTYPE), rtol=state_rtol) def test_sequential_assimilations(self): """Make sure this follows Bayes' rule.""" bg = np.array((18., 15., 22.)) bg_var = np.array((2., 2., 2.)) bg_corr = np.array(((1, .5, .25), (.5, 1, .5), (.25, .5, 1))) obs = np.array((19., 14.)) obs_var = np.array((1., 1.)) obs_op = np.array(((1., 0., 0.), (0., 1., 0.))) bg_std = np.sqrt(bg_var) bg_cov = np.diag(bg_std).dot(bg_corr.dot(np.diag(bg_std))) # obs_std = np.sqrt(obs_var) # Assume no correlations between observations. obs_cov = np.diag(obs_var) for method in ALL_METHODS: name = getname(method) if "var" in name.lower() or "psas" in name.lower(): state_rtol = ITERATIVE_STATE_TOLERANCE cov_rtol = ITERATIVE_COVARIANCE_TOLERANCE else: # The default for assert_allclose cov_rtol = state_rtol = EXACT_TOLERANCE with self.subTest(method=name): inter1, inter_cov1 = method( bg, bg_cov, obs[0], obs_cov[0, 0], obs_op[0, :]) post1, post_cov1 = method( inter1, inter_cov1, obs[1], obs_cov[1, 1], obs_op[1, :]) post2, post_cov2 = method( bg, bg_cov, obs, obs_cov, obs_op) np_tst.assert_allclose( post1, post2, rtol=state_rtol) if "psas" in name.lower(): # The second covariance isn't positive definite (one # positive entry) and no entry shares the order of # magnitude between the two. raise unittest2.SkipTest("Known Failure: PSAS Covariances") np_tst.assert_allclose( post_cov1, post_cov2, rtol=cov_rtol) def test_iterative_failures(self): """Test failure modes of iterative solvers.""" bg_stds = np.logspace(-8, 1, 10) bg_corr = scipy.linalg.toeplitz( np.arange(1, .9, -.01)) bg_cov = np.diag(bg_stds).dot(bg_corr).dot(np.diag(bg_stds)) bg_vals = np.arange(10) obs_op = np.eye(3, 10) obs_vals = 10 - np.arange(3) obs_cov = np.diag((10, 1e-3, 1e-6)) / 8 for method in ALL_METHODS[ITERATIVE_METHOD_START:]: name = getname(method) with self.subTest(method=name): with self.assertRaises( atmos_flux_inversion.ConvergenceError) as cxt_mgr: method(bg_vals, bg_cov, obs_vals, obs_cov, obs_op) conv_err = cxt_mgr.exception self.assertTrue(hasattr(conv_err, "guess")) self.assertTrue(hasattr(conv_err, "result")) self.assertIsInstance(conv_err.result, scipy.optimize.OptimizeResult) self.assertTrue(hasattr(conv_err, "hess_inv")) class TestGaussianNoise(unittest2.TestCase): """Test the properties of the gaussian noise.""" def test_ident_cov(self): """Test generation with identity as covariance.""" sample_shape = 3 cov = np.eye(sample_shape) noise = atmos_flux_inversion.noise.gaussian_noise(cov, int(1e6)) np_tst.assert_allclose(noise.mean(axis=0), np.zeros((sample_shape,)), rtol=1e-2, atol=1e-2) np_tst.assert_allclose(np.cov(noise.T), cov, rtol=1e-2, atol=1e-2) def test_shape(self): """Make sure the returned shapes are correct.""" sample_shape = (3,) sample_cov = np.eye(sample_shape[0]) for shape in ((), (6,), (2, 3)): with self.subTest(shape=shape): res = atmos_flux_inversion.noise.gaussian_noise( sample_cov, shape) self.assertEqual(res.shape, shape + sample_shape) with self.subTest(shape=5): res = atmos_flux_inversion.noise.gaussian_noise( sample_cov, 5) self.assertEqual(res.shape, (5,) + sample_shape) with self.subTest(shape=None): res = atmos_flux_inversion.noise.gaussian_noise( sample_cov, None) self.assertEqual(res.shape, sample_shape) def test_operator(self): """Test that the code works with operator covariances.""" diagonal = (1, .5, .3, .2, .1) sample_cov = atmos_flux_inversion.covariances.DiagonalOperator( diagonal) sample_shape = (len(diagonal),) noise = atmos_flux_inversion.noise.gaussian_noise(sample_cov, int(1e6)) np_tst.assert_allclose(noise.mean(axis=0), np.zeros(sample_shape), rtol=1e-2, atol=1e-2) np_tst.assert_allclose(np.cov(noise.T), np.diag(diagonal), rtol=1e-2, atol=1e-2) def test_kron_op(self): """Test that large kronecker operators don't break the handling.""" op1 = scipy.linalg.toeplitz(.6 ** np.arange(15)) diag = (1, .9, .8, .7, .6, .5, .4, .3, .2, .1) op2 = atmos_flux_inversion.covariances.DiagonalOperator(diag) combined = atmos_flux_inversion.util.kronecker_product(op1, op2) noise = atmos_flux_inversion.noise.gaussian_noise(combined, int(1e5)) np_tst.assert_allclose(noise.mean(axis=0), np.zeros(combined.shape[0]), rtol=1.1e-2, atol=1.1e-2) np_tst.assert_allclose(np.cov(noise.T), scipy.linalg.kron(op1, np.diag(diag)), rtol=3e-2, atol=3e-2) def test_off_diagonal(self): """Test that the code works with off-diagonal elements.""" sample_cov = scipy.linalg.toeplitz((1, .5, .25, .125)) sample_shape = (4,) noise = atmos_flux_inversion.noise.gaussian_noise(sample_cov, int(1e6)) np_tst.assert_allclose(noise.mean(axis=0), np.zeros(sample_shape), rtol=1e-2, atol=1e-2) np_tst.assert_allclose(np.cov(noise.T), sample_cov, rtol=1e-2, atol=1e-2) def test_slow_decay(self): """Test that the code handles slowly-decaying covariances.""" sample_cov = scipy.linalg.toeplitz(.8 ** np.arange(10)) sample_shape = (10,) noise = atmos_flux_inversion.noise.gaussian_noise(sample_cov, int(1e6)) np_tst.assert_allclose(noise.mean(axis=0), np.zeros(sample_shape), rtol=1e-2, atol=1e-2) np_tst.assert_allclose(np.cov(noise.T), sample_cov, rtol=1e-2, atol=1e-2) def test_fails(self): """Test that construction fails on invalid input.""" self.assertRaises(ValueError, atmos_flux_inversion.noise.gaussian_noise, np.ones(10)) self.assertRaises(ValueError, atmos_flux_inversion.noise.gaussian_noise, np.eye(3, 2)) class TestCorrelations(unittest2.TestCase): """Test the generation of correlation matrices.""" def test_far_correl(self): """Test the correlation between points far apart. Should be zero. """ for corr_class in ( atmos_flux_inversion.correlations.DistanceCorrelationFunction .__subclasses__()): with self.subTest(corr_class=corr_class.__name__): corr_fun = corr_class(1e-8) corr = corr_fun(1e8) self.assertAlmostEqual(corr, 0) def test_near_correl(self): """Test 2D correlation between near points. Should be one. """ for corr_class in ( atmos_flux_inversion.correlations.DistanceCorrelationFunction .__subclasses__()): with self.subTest(corr_class=corr_class.__name__): corr_fun = corr_class(1e8) corr = corr_fun(1e-8) self.assertAlmostEqual(corr, 1) def test_2d_np_fromfunction(self): """Test that the structure works with np.fromfunction. This is how the integration tests will get background covariances, so this needs to work. """ test_size = (int(15), int(20)) for corr_class in ( atmos_flux_inversion.correlations.DistanceCorrelationFunction .__subclasses__()): with self.subTest(corr_class=getname(corr_class)): corr_fun = corr_class(2.) corr = np.fromfunction(corr_fun.correlation_from_index, shape=test_size * 2, dtype=float) corr_mat = corr.reshape((np.prod(test_size),) * 2) # test postitive definite try: chol_upper = cholesky(corr_mat) except la.LinAlgError: self.fail("corr_mat not positive definite") # test symmetry np_tst.assert_allclose(chol_upper.T.dot(chol_upper), corr_mat, rtol=1e-4, atol=1e-4) def test_2d_make_matrix(self): """Test make_matrix for 2D correlations. Checks against original value. This test is really slow. """ # 30x25 Gaussian 10 not close test_nx = 30 test_ny = 20 test_points = test_ny * test_nx # TODO: speed up for corr_class in ( atmos_flux_inversion.correlations.DistanceCorrelationFunction. __subclasses__()): for dist in (1, 5, 10, 15): with self.subTest(corr_class=getname(corr_class), dist=dist): if ( corr_class == atmos_flux_inversion.correlations. GaussianCorrelation ): raise unittest2.SkipTest( "Gaussian({0:d}) correlations ill-conditioned". format(dist) ) corr_fun = corr_class(dist) corr_mat = atmos_flux_inversion.correlations.make_matrix( corr_fun, (test_ny, test_nx)) # Make sure diagonal elements are ones np_tst.assert_allclose(np.diag(corr_mat), 1, rtol=1e-6) # check if it matches the original np_tst.assert_allclose( corr_mat, np.fromfunction( corr_fun.correlation_from_index, (test_ny, test_nx, test_ny, test_nx) ).reshape((test_points, test_points)), # rtol=1e-13: Gaussian 10 and 15 fail # atol=1e-15: Gaussian 1 and 5 fail rtol=1e-5, atol=1e-6) # check if it actually is positive definite cholesky(corr_mat) def test_1d_np_fromfunction(self): """Test that the structure works with np.fromfunction. This is how the integration tests will get background covariances, so this needs to work. """ test_size = (200,) for corr_class in ( atmos_flux_inversion.correlations.DistanceCorrelationFunction .__subclasses__()): with self.subTest(corr_class=getname(corr_class)): # This fails with a correlation length of 5 corr_fun = corr_class(2.) corr = np.fromfunction(corr_fun.correlation_from_index, shape=test_size * 2, dtype=float) corr_mat = corr.reshape((np.prod(test_size),) * 2) # test postitive definite chol_upper = cholesky(corr_mat) # test symmetry np_tst.assert_allclose(chol_upper.T.dot(chol_upper), corr_mat, rtol=1e-4, atol=1e-4) def test_1d_make_matrix(self): """Test make_matrix for 1D correlations. Checks against original value. """ test_nt = 200 for corr_class in ( atmos_flux_inversion.correlations.DistanceCorrelationFunction. __subclasses__()): for dist in (1, 5, 10, 30): with self.subTest(corr_class=getname(corr_class), dist=dist): if ( corr_class == atmos_flux_inversion.correlations. GaussianCorrelation ): raise unittest2.SkipTest( "Gaussian({0:d}) correlations ill-conditioned". format(dist) ) corr_fun = corr_class(dist) corr_mat = atmos_flux_inversion.correlations.make_matrix( corr_fun, test_nt ) # Make sure diagonal elements are ones np_tst.assert_allclose(np.diag(corr_mat), 1, rtol=1e-6) # check if it matches the original np_tst.assert_allclose( corr_mat, np.fromfunction( corr_fun.correlation_from_index, (test_nt, test_nt) ).reshape((test_nt, test_nt)), # rtol=1e-13: Gaussian 10 and 15 fail # atol=1e-15: Gaussian 1 and 5 fail rtol=2e-7, atol=5e-7 ) # check if it actually is positive definite chol_upper = cholesky(corr_mat) # test symmetry np_tst.assert_allclose(chol_upper.T.dot(chol_upper), corr_mat, rtol=1e-4, atol=1e-4) def test_fft_correlation_structure(self): """Ensure the FFT-based operators satisfy conditions of correlation matrices. Checks for symmetry and ones on the diagonal. """ for corr_class in ( atmos_flux_inversion.correlations.DistanceCorrelationFunction. __subclasses__()): for test_shape in ((300,), (20, 30)): test_size = int(np.prod(test_shape, dtype=int)) for dist in (1, 3, 10, 30): for is_cyclic in (True, False): corr_fun = corr_class(dist) corr_op = ( atmos_flux_inversion.correlations. HomogeneousIsotropicCorrelation. from_function(corr_fun, test_shape, is_cyclic)) # This is the fastest way to get column-major # order from da.eye. corr_mat = corr_op.dot(np.eye(test_size).T) with self.subTest( corr_class=getname(corr_class), dist=dist, test_shape=test_shape, is_cyclic=is_cyclic, test="symmetry"): np_tst.assert_allclose(corr_mat, corr_mat.T, rtol=1e-14, atol=1e-15) with self.subTest( corr_class=getname(corr_class), dist=dist, test_shape=test_shape, is_cyclic=is_cyclic, test="self-correlation"): np_tst.assert_allclose(np.diag(corr_mat), 1) def test_1d_fft_correlation_cyclic(self): """Test HomogeneousIsotropicCorrelation for cyclic 1D arrays. Check against `make_matrix` and ignore values near the edges of the domain where the two methods are different. """ test_nt = 512 test_lst = (np.zeros(test_nt), np.ones(test_nt), np.arange(test_nt), np.eye(100, test_nt)[-1]) for corr_class in ( atmos_flux_inversion.correlations.DistanceCorrelationFunction. __subclasses__()): for dist in (1, 3, 10): # Magic numbers # May need to increase for larger test_nt noncorr_dist = 20 + 8 * dist corr_fun = corr_class(dist) corr_mat = atmos_flux_inversion.correlations.make_matrix( corr_fun, test_nt) corr_op = ( atmos_flux_inversion.correlations. HomogeneousIsotropicCorrelation. from_function(corr_fun, test_nt)) for i, test_vec in enumerate(test_lst): with self.subTest(corr_class=getname(corr_class), dist=dist, test_num=i, inverse="no"): np_tst.assert_allclose( corr_op.dot(test_vec)[noncorr_dist:-noncorr_dist], corr_mat.dot(test_vec)[noncorr_dist:-noncorr_dist], rtol=1e-3, atol=1.5e-3) for i, test_vec in enumerate(test_lst): with self.subTest(corr_class=getname(corr_class), dist=dist, test_num=i, inverse="yes"): if ((corr_class is atmos_flux_inversion.correlations. GaussianCorrelation and dist >= 3)): # Gaussian(3) has FFT less # well-conditioned than make_matrix raise unittest2.SkipTest( "Gaussian({0:d}) correlations ill-conditioned". format(dist)) elif ((corr_class is atmos_flux_inversion.correlations. BalgovindCorrelation and dist == 10)): # This one distance is problematic # Roughly 3% of the points disagree # for the last half of the tests # I have no idea why raise unittest2.SkipTest( "Balgovind(10) correlations weird") np_tst.assert_allclose( corr_op.solve( test_vec)[noncorr_dist:-noncorr_dist], la.solve( corr_mat, test_vec)[noncorr_dist:-noncorr_dist], rtol=1e-3, atol=2e-3 ) def test_1d_fft_correlation_acyclic(self): """Test HomogeneousIsotropicCorrelation for acyclic 1D arrays. Check against `make_matrix` and ignore values near the edges of the domain where the two methods are different. """ test_nt = 512 test_lst = (np.zeros(test_nt), np.ones(test_nt), np.arange(test_nt), np.eye(100, test_nt)[-1]) for corr_class in ( atmos_flux_inversion.correlations.DistanceCorrelationFunction. __subclasses__()): for dist in (1, 3, 10): # Magic numbers # May need to increase for larger test_nt corr_fun = corr_class(dist) corr_mat = atmos_flux_inversion.correlations.make_matrix( corr_fun, test_nt) corr_op = ( atmos_flux_inversion.correlations. HomogeneousIsotropicCorrelation. from_function(corr_fun, test_nt, False)) for i, test_vec in enumerate(test_lst): with self.subTest(corr_class=getname(corr_class), dist=dist, test_num=i, inverse="no"): np_tst.assert_allclose( corr_op.dot(test_vec), corr_mat.dot(test_vec), rtol=1e-3, atol=1e-5) for i, test_vec in enumerate(test_lst): with self.subTest(corr_class=getname(corr_class), dist=dist, test_num=i, inverse="yes"): self.assertRaises( NotImplementedError, corr_op.solve, test_vec) def test_2d_fft_correlation_cyclic(self): """Test HomogeneousIsotropicCorrelation for cyclic 2D arrays. Check against `make_matrix` and ignore values near the edges where the two methods differ. """ test_shape = (20, 30) test_size = np.prod(test_shape) test_lst = (np.zeros(test_size), np.ones(test_size), np.arange(test_size), np.eye(10 * test_shape[0], test_size)[-1]) for corr_class in ( atmos_flux_inversion.correlations.DistanceCorrelationFunction. __subclasses__()): for dist in (1, 3): # Magic numbers # May need to increase for larger domains noncorr_dist = 20 + 8 * dist corr_fun = corr_class(dist) corr_mat = atmos_flux_inversion.correlations.make_matrix( corr_fun, test_shape) corr_op = ( atmos_flux_inversion.correlations. HomogeneousIsotropicCorrelation. from_function(corr_fun, test_shape)) for i, test_vec in enumerate(test_lst): with self.subTest(corr_class=getname(corr_class), dist=dist, test_num=i, direction="forward"): np_tst.assert_allclose( corr_op.dot(test_vec).reshape(test_shape) [noncorr_dist:-noncorr_dist, noncorr_dist:-noncorr_dist], corr_mat.dot(test_vec).reshape(test_shape) [noncorr_dist:-noncorr_dist, noncorr_dist:-noncorr_dist], rtol=1e-3, atol=1e-5) for i, test_vec in enumerate(test_lst): with self.subTest(corr_class=getname(corr_class), dist=dist, test_num=i, direction="backward"): if ((corr_class is atmos_flux_inversion.correlations. GaussianCorrelation and dist >= 3)): # Gaussian(3) has FFT less # well-conditioned than make_matrix raise unittest2.SkipTest( "Gaussian({0:d}) correlations ill-conditioned". format(dist)) np_tst.assert_allclose( corr_op.solve( test_vec).reshape(test_shape) [noncorr_dist:-noncorr_dist, noncorr_dist:-noncorr_dist], la.solve( corr_mat, test_vec).reshape(test_shape) [noncorr_dist:-noncorr_dist, noncorr_dist:-noncorr_dist], rtol=1e-3, atol=1e-5) def test_2d_fft_correlation_acyclic(self): """Test HomogeneousIsotropicCorrelation for acyclic 2D arrays. Check against `make_matrix` and ignore values near the edges where the two methods differ. """ test_shape = (20, 30) test_size = np.prod(test_shape) test_lst = (np.zeros(test_size), np.ones(test_size), np.arange(test_size), np.eye(10 * test_shape[0], test_size)[-1]) for corr_class in ( atmos_flux_inversion.correlations.DistanceCorrelationFunction. __subclasses__()): for dist in (1, 3): # Magic numbers # May need to increase for larger domains corr_fun = corr_class(dist) corr_mat = atmos_flux_inversion.correlations.make_matrix( corr_fun, test_shape) corr_op = ( atmos_flux_inversion.correlations. HomogeneousIsotropicCorrelation. from_function(corr_fun, test_shape, False)) for i, test_vec in enumerate(test_lst): with self.subTest(corr_class=getname(corr_class), dist=dist, test_num=i, direction="forward"): np_tst.assert_allclose( corr_op.dot(test_vec).reshape(test_shape), corr_mat.dot(test_vec).reshape(test_shape), rtol=1e-3, atol=1e-5) for i, test_vec in enumerate(test_lst): with self.subTest(corr_class=getname(corr_class), dist=dist, test_num=i, direction="backward"): self.assertRaises( NotImplementedError, corr_op.solve, test_vec) def test_homogeneous_from_array_cyclic(self): """Make sure cyclic from_array can be roundtripped. Also tests that odd state sizes work. """ test_size = 25 corr_class = atmos_flux_inversion.correlations.ExponentialCorrelation for dist in (1, 3, 5): with self.subTest(dist=dist): corr_fun = corr_class(dist) corr_op1 = ( atmos_flux_inversion.correlations. HomogeneousIsotropicCorrelation. from_function(corr_fun, test_size, True)) first_column = corr_op1.dot(np.eye(test_size, 1)[:, 0]) corr_op2 = ( atmos_flux_inversion.correlations. HomogeneousIsotropicCorrelation. from_array(first_column)) np_tst.assert_allclose( corr_op1.dot(np.eye(test_size)), corr_op2.dot(np.eye(test_size))) def test_kron_composition(self): """Test that `kron` works similar to composition of the domains.""" HomogeneousIsotropicCorrelation = ( atmos_flux_inversion.correlations. HomogeneousIsotropicCorrelation ) corr_class = atmos_flux_inversion.correlations.GaussianCorrelation corr_fun = corr_class(5) shape1 = (5,) shape2 = (7,) corr_op1 = (HomogeneousIsotropicCorrelation. from_function(corr_fun, shape1)) corr_op2 = (HomogeneousIsotropicCorrelation. from_function(corr_fun, shape2)) kron_corr = corr_op1.kron(corr_op2) direct_corr = (HomogeneousIsotropicCorrelation. from_function(corr_fun, shape1 + shape2)) self.assertEqual(kron_corr.shape, direct_corr.shape) self.assertEqual(kron_corr._underlying_shape, direct_corr._underlying_shape) np_tst.assert_allclose(kron_corr._corr_fourier, direct_corr._corr_fourier) np_tst.assert_allclose(kron_corr._fourier_near_zero, direct_corr._fourier_near_zero) def test_kron_results(self): """Test the Kronecker product implementation.""" HomogeneousIsotropicCorrelation = ( atmos_flux_inversion.correlations.HomogeneousIsotropicCorrelation) corr_class = atmos_flux_inversion.correlations.ExponentialCorrelation test_shapes = (20, 25, (5, 6)) distances = (3, 5,) for dist1, shape1, dist2, shape2 in itertools.product( distances, test_shapes, repeat=2): with self.subTest(dist1=dist1, dist2=dist2): corr_fun1 = corr_class(dist1) corr_fun2 = corr_class(dist2) corr_op1 = ( HomogeneousIsotropicCorrelation. from_function(corr_fun1, shape1)) corr_op2 = ( HomogeneousIsotropicCorrelation. from_function(corr_fun2, shape2)) size1 = np.prod(shape1) size2 = np.prod(shape2) corr_mat1 = corr_op1.dot(np.eye(size1)) corr_mat2 = corr_op2.dot(np.eye(size2)) full_corr1 = corr_op1.kron(corr_op2) full_corr2 = scipy.linalg.kron(np.asarray(corr_mat1), np.asarray(corr_mat2)) self.assertIsInstance( corr_op1, HomogeneousIsotropicCorrelation) test_vec = np.arange(size1 * size2) np_tst.assert_allclose( full_corr1.dot(test_vec), full_corr2.dot(test_vec)) test_mat = np.eye(size1 * size2) np_tst.assert_allclose( full_corr1.dot(test_mat), full_corr2.dot(test_mat)) def test_kron_delegate(self): """Test that kron delegates where appropriate.""" op1 = (atmos_flux_inversion.correlations. HomogeneousIsotropicCorrelation. from_array((1, .5, .25))) mat2 = np.eye(5) combined_op = op1.kron(mat2) self.assertIsInstance( combined_op, atmos_flux_inversion.linalg.SchmidtKroneckerProduct ) def test_sqrt_direct(self): """Test the square root in the most direct manner possible. Checks whether matrices corresponding to sqrt.T@sqrt and the original matrix are approximately equal. """ operator = (atmos_flux_inversion.correlations. HomogeneousIsotropicCorrelation. from_array((1, .5, .25, .125))) sqrt = operator.sqrt() sqrt_squared = sqrt.T.dot(sqrt) mat = np.eye(4) np_tst.assert_allclose(operator.dot(mat), sqrt_squared.dot(mat)) def test_from_function_direct(self): """Directly test the output of from_function.""" corr_func = (atmos_flux_inversion.correlations. ExponentialCorrelation(1 / np.log(2))) from_function = ( atmos_flux_inversion.correlations.HomogeneousIsotropicCorrelation. from_function) toeplitz = scipy.linalg.toeplitz with self.subTest(is_cyclic=False, nd=1): corr_op = from_function(corr_func, [10], False) np_tst.assert_allclose( corr_op.dot(np.eye(10)), toeplitz(0.5 ** np.arange(10))) with self.subTest(is_cyclic=False, nd=2): corr_op = from_function(corr_func, [2, 3], False) same_row = toeplitz(0.5 ** np.array([0, 1, 2])) other_row = toeplitz( 0.5 ** np.array([1, np.sqrt(2), np.sqrt(5)])) np_tst.assert_allclose( corr_op.dot(np.eye(6)), np.block([[same_row, other_row], [other_row, same_row]])) corr_op = from_function(corr_func, [4, 6], False) same_row = toeplitz(0.5 ** np.arange(6)) next_row = toeplitz( 0.5 ** np.array([1, np.sqrt(2), np.sqrt(5), np.sqrt(10), np.sqrt(17), np.sqrt(26)])) row_after_next = toeplitz( 0.5 ** np.array([2, np.sqrt(5), np.sqrt(8), np.sqrt(13), np.sqrt(20), np.sqrt(29)])) two_rows_on = toeplitz( 0.5 ** np.array([3, np.sqrt(10), np.sqrt(13), np.sqrt(18), 5, np.sqrt(34)])) np_tst.assert_allclose( corr_op.dot(np.eye(24)), np.block([[same_row, next_row, row_after_next, two_rows_on], [next_row, same_row, next_row, row_after_next], [row_after_next, next_row, same_row, next_row], [two_rows_on, row_after_next, next_row, same_row]])) with self.subTest(is_cyclic=True, nd=1): corr_op = from_function(corr_func, [10], True) np_tst.assert_allclose( corr_op.dot(np.eye(10)), toeplitz( 0.5 ** np.array([0, 1, 2, 3, 4, 5, 4, 3, 2, 1]))) with self.subTest(is_cyclic=True, nd=2): corr_op = from_function(corr_func, [4, 6]) same_row = toeplitz( 0.5 ** np.array([0, 1, 2, 3, 2, 1])) next_row = toeplitz( 0.5 ** np.array([1, np.sqrt(2), np.sqrt(5), np.sqrt(10), np.sqrt(5), np.sqrt(2)])) row_after_next = toeplitz( 0.5 ** np.array([2, np.sqrt(5), np.sqrt(8), np.sqrt(13), np.sqrt(8), np.sqrt(5)])) np_tst.assert_allclose( corr_op.dot(np.eye(24)), np.block([[same_row, next_row, row_after_next, next_row], [next_row, same_row, next_row, row_after_next], [row_after_next, next_row, same_row, next_row], [next_row, row_after_next, next_row, same_row]])) def test_inv(self): """Test inverse matches linalg.""" corr_func = (atmos_flux_inversion.correlations. ExponentialCorrelation(1 / np.log(2))) from_function = ( atmos_flux_inversion.correlations.HomogeneousIsotropicCorrelation. from_function) for test_shape in (10, 11, (3, 3), (4, 4)): with self.subTest(test_shape=test_shape): corr_op = from_function(corr_func, test_shape) test_size = np.prod(test_shape) ident = np.eye(test_size) np_tst.assert_allclose( corr_op.inv().dot(ident), la.inv(corr_op.dot(ident)), rtol=1e-5, atol=1e-5) def test_acyclic_inv_fails(self): """Test inverse fails for acyclic correlations.""" corr_func = (atmos_flux_inversion.correlations. ExponentialCorrelation(1 / np.log(2))) from_function = ( atmos_flux_inversion.correlations.HomogeneousIsotropicCorrelation. from_function) for test_shape in (10, 11, (3, 3), (4, 4)): with self.subTest(test_shape=test_shape): corr_op = from_function(corr_func, test_shape, is_cyclic=False) self.assertRaises( NotImplementedError, corr_op.inv) def test_wrong_shape_fails(self): """Test that a vector of the wrong shape fails noisily.""" corr_func = (atmos_flux_inversion.correlations. ExponentialCorrelation(2)) corr_op = ( atmos_flux_inversion.correlations.HomogeneousIsotropicCorrelation. from_function(corr_func, (3, 4))) self.assertRaises( ValueError, corr_op.solve, np.arange(5)) def test_cyclic_from_array(self): """Test from_array with assumed cyclic correlations.""" array = [1, .5, .25, .125, .0625, .125, .25, .5] op = (atmos_flux_inversion.correlations. HomogeneousIsotropicCorrelation. from_array(array)) mat = scipy.linalg.toeplitz(array) np_tst.assert_allclose(op.dot(np.eye(*mat.shape)), mat) def test_acyclic_from_array(self): """Test from_array with correlations assumed acyclic.""" array = [1, .5, .25, .125, .0625, .03125] op = (atmos_flux_inversion.correlations. HomogeneousIsotropicCorrelation. from_array(array, False)) mat = scipy.linalg.toeplitz(array) np_tst.assert_allclose(op.dot(np.eye(*mat.shape)), mat) @unittest2.skipUnless(HAVE_SPARSE, "sparse not installed") def test_sparse(self): """Test HomogeneousIsotropicCorrelations work on sparse.COO.""" array = 2. ** -np.arange(6) op = (atmos_flux_inversion.correlations. HomogeneousIsotropicCorrelation. from_array(array, False)) mat = scipy.linalg.toeplitz(array) np_tst.assert_allclose(op.dot(sparse.eye(*mat.shape)), mat) class TestSchmidtKroneckerProduct(unittest2.TestCase): """Test the Schmidt Kronecker product implementation for LinearOperators. This class tests the implementation based on the Schmidt decomposition. """ def test_identity(self): """Test that the implementation works with identity matrices.""" test_sizes = (4, 5) SchmidtKroneckerProduct = ( atmos_flux_inversion.linalg.SchmidtKroneckerProduct) # I want to be sure either being smaller works. # Even versus odd also causes problems occasionally for size1, size2 in itertools.product(test_sizes, repeat=2): with self.subTest(size1=size1, size2=size2): mat1 = np.eye(size1) mat2 = np.eye(size2) full_mat = SchmidtKroneckerProduct( mat1, mat2) big_ident = np.eye(size1 * size2) np_tst.assert_allclose( full_mat.dot(big_ident), big_ident) def test_identical_submatrices(self): """Test whether the implementation will generate identical blocks.""" mat1 = np.ones((3, 3)) mat2 = ((1, .5, .25), (.5, 1, .5), (.25, .5, 1)) np_tst.assert_allclose( atmos_flux_inversion.linalg.SchmidtKroneckerProduct( mat1, mat2).dot(np.eye(9)), np.tile(mat2, (3, 3))) def test_constant_blocks(self): """Test whether the implementation will produce constant blocks.""" mat1 = ((1, .5, .25), (.5, 1, .5), (.25, .5, 1)) mat2 = np.ones((3, 3)) np_tst.assert_allclose( atmos_flux_inversion.linalg.SchmidtKroneckerProduct( mat1, mat2).dot(np.eye(9)), np.repeat(np.repeat(mat1, 3, 0), 3, 1)) def test_entangled_state(self): """Test whether the implementation works with entangled states.""" sigmax = np.array(((0, 1), (1, 0))) sigmaz = np.array(((1, 0), (0, -1))) operator = atmos_flux_inversion.linalg.SchmidtKroneckerProduct( sigmax, sigmaz) matrix = scipy.linalg.kron(sigmax, sigmaz) # (k01 - k10) / sqrt(2) epr_state = (0, .7071, -.7071, 0) np_tst.assert_allclose( operator.dot(epr_state), matrix.dot(epr_state)) def test_drop_small(self): """Test that the implementation properly drops small components.""" SchmidtKroneckerProduct = ( atmos_flux_inversion.linalg.SchmidtKroneckerProduct) # I want to be sure either being smaller works. # Even versus odd also causes problems occasionally mat1 = np.eye(2) mat2 = np.eye(3) full_mat = SchmidtKroneckerProduct( mat1, mat2) test_vec = np.array([1, 0, 0, 0, 1e-15, 0]) np_tst.assert_allclose( full_mat.dot(test_vec), np.eye(6, 1)[:, 0]) def test_transpose(self): """Test that SchmidtKroneckerProduct can be transposed.""" mat1 = np.eye(2) mat2 = np.eye(3) op = atmos_flux_inversion.linalg.SchmidtKroneckerProduct( mat1, mat2 ) op_transpose = op.T np_tst.assert_allclose( op_transpose.dot(np.eye(6)), np.eye(6)) class TestYMKroneckerProduct(unittest2.TestCase): """Test the YM13 Kronecker product implementation for LinearOperators. This tests the :class:`~atmos_flux_inversion.linalg.DaskKroneckerProductOperator` implementation based on the algorithm in Yadav and Michalak (2013) """ def test_identity(self): """Test that the implementation works with identity matrices.""" test_sizes = (4, 5) DaskKroneckerProductOperator = ( atmos_flux_inversion.linalg.DaskKroneckerProductOperator) # I want to be sure either being smaller works. # Even versus odd also causes problems occasionally for size1, size2 in itertools.product(test_sizes, repeat=2): with self.subTest(size1=size1, size2=size2): mat1 = np.eye(size1) mat2 = np.eye(size2) full_mat = DaskKroneckerProductOperator( mat1, mat2) big_ident = np.eye(size1 * size2) np_tst.assert_allclose( full_mat.dot(big_ident), big_ident) def test_identical_submatrices(self): """Test whether the implementation will generate identical blocks.""" mat1 = np.ones((3, 3)) mat2 = np.array(((1, .5, .25), (.5, 1, .5), (.25, .5, 1))) np_tst.assert_allclose( atmos_flux_inversion.linalg.DaskKroneckerProductOperator( mat1, mat2).dot(np.eye(9)), np.tile(mat2, (3, 3))) def test_constant_blocks(self): """Test whether the implementation will produce constant blocks.""" mat1 = np.array(((1, .5, .25), (.5, 1, .5), (.25, .5, 1))) mat2 = np.ones((3, 3)) np_tst.assert_allclose( atmos_flux_inversion.linalg.DaskKroneckerProductOperator( mat1, mat2).dot(np.eye(9)), np.repeat(np.repeat(mat1, 3, 0), 3, 1)) def test_entangled_state(self): """Test whether the implementation works with entangled states.""" sigmax = np.array(((0, 1), (1, 0))) sigmaz = np.array(((1, 0), (0, -1))) operator = atmos_flux_inversion.linalg.DaskKroneckerProductOperator( sigmax, sigmaz) matrix = scipy.linalg.kron(sigmax, sigmaz) # (k01 - k10) / sqrt(2) epr_state = (0, .7071, -.7071, 0) np_tst.assert_allclose( operator.dot(epr_state), matrix.dot(epr_state)) @unittest2.skipUnless(HAVE_SPARSE, "sparse not installed") def test_sparse(self): """Test that DaskKroneckerProductOperator works on sparse.COO.""" sigmax = np.array(((0, 1), (1, 0))) sigmaz = np.array(((1, 0), (0, -1))) operator = atmos_flux_inversion.linalg.DaskKroneckerProductOperator( sigmax, sigmaz) matrix = scipy.linalg.kron(sigmax, sigmaz) epr_state = np.array((0, .7071, -.7071, 0)) np_tst.assert_allclose( operator.dot(sparse.COO(epr_state)), matrix.dot(epr_state)) def test_transpose(self): """Test whether the transpose is properly implemented.""" mat1 = np.eye(3) mat2 = atmos_flux_inversion.covariances.DiagonalOperator((1, 1)) mat3 = np.eye(4) DaskKroneckerProductOperator = ( atmos_flux_inversion.linalg.DaskKroneckerProductOperator) with self.subTest(check="symmetric"): product = DaskKroneckerProductOperator( mat1, mat2) self.assertIs(product.T, product) with self.subTest(check="asymmetric1"): mat1[0, 1] = 1 product = DaskKroneckerProductOperator( mat1, mat2) transpose = product.T self.assertIsNot(transpose, product) np_tst.assert_allclose(transpose._operator1, mat1.T) with self.subTest(check="asymmetric2"): product = DaskKroneckerProductOperator( mat3, mat1) transpose = product.T self.assertIsNot(transpose, product) self.assertIs(transpose._operator1, mat3) np_tst.assert_allclose(transpose._operator2.A, mat1.T) with self.subTest(check="asymmetric3"): product = DaskKroneckerProductOperator( mat1, mat1) transpose = product.T np_tst.assert_allclose(transpose._operator1, mat1.T) np_tst.assert_allclose(transpose._operator2.A, mat1.T) with self.subTest(check="rectangular"): product = DaskKroneckerProductOperator( mat1[:2], mat3[:3]) transpose = product.T np_tst.assert_allclose(transpose._operator1, mat1[:2].T) np_tst.assert_allclose(transpose._operator2.A, mat3[:3].T) def test_sqrt(self): """Test whether the sqrt method works as intended.""" matrix1 = np.eye(2) matrix2 = atmos_flux_inversion.covariances.DiagonalOperator((1, 2, 3)) tester = np.eye(6) product = atmos_flux_inversion.linalg.DaskKroneckerProductOperator( matrix1, matrix2) sqrt = product.sqrt() proposed = sqrt.T.dot(sqrt) np_tst.assert_allclose(proposed.dot(tester), product.dot(tester)) # Should I check the submatrices or assume that's covered? def test_quadratic_form(self): """Test whether quadratic_form returns the intended result.""" matrix1 = scipy.linalg.toeplitz((1., 1/3., 1/9., 1/27., 1/81.)) # noqa matrix2 = scipy.linalg.toeplitz((1., .5, .25, .125, .0625, .03125)) product = atmos_flux_inversion.linalg.DaskKroneckerProductOperator( matrix1, matrix2) tester = np.eye(product.shape[0]) dense_product = scipy.linalg.kron(matrix1, matrix2) test_vec = np.arange(product.shape[0]) np_tst.assert_allclose(product.quadratic_form(tester), dense_product) np_tst.assert_allclose(product.quadratic_form(test_vec), test_vec.dot(dense_product.dot(test_vec))) test_op = atmos_flux_inversion.linalg.DiagonalOperator(test_vec) self.assertRaises( TypeError, product.quadratic_form, test_op) self.assertRaises( ValueError, product.quadratic_form, test_vec[:-1]) @unittest2.skipUnless(HAVE_SPARSE, "sparse not installed") def test_quadratic_form_sparse(self): """Test that quadratic_form works on sparse.COO.""" matrix1 = scipy.linalg.toeplitz(3. ** -np.arange(4)) matrix2 = scipy.linalg.toeplitz(5. ** -np.arange(5)) product = atmos_flux_inversion.linalg.DaskKroneckerProductOperator( matrix1, matrix2) tester = sparse.eye(product.shape[0]) dense_product = scipy.linalg.kron(matrix1, matrix2) np_tst.assert_allclose(product.quadratic_form(tester), dense_product) def test_matrix_linop(self): """Test that the implementation works with MatrixLinearOperator.""" test_sizes = (4, 5) DaskKroneckerProductOperator = ( atmos_flux_inversion.linalg.DaskKroneckerProductOperator) # I want to be sure either being smaller works. # Even versus odd also causes problems occasionally for size1, size2 in itertools.product(test_sizes, repeat=2): with self.subTest(size1=size1, size2=size2): mat1 = tolinearoperator(np.eye(size1)) mat2 = np.eye(size2) full_mat = DaskKroneckerProductOperator( mat1, mat2) big_ident = np.eye(size1 * size2) np_tst.assert_allclose( full_mat.dot(big_ident), big_ident) def test_fails_not_array(self): """Test for failure if the first operator is not an array. The implementation requires it. The implementation should fail quickly, not slowly. """ mat1 = atmos_flux_inversion.linalg.DiagonalOperator(np.arange(10)) mat2 = np.eye(3) self.assertRaises( ValueError, atmos_flux_inversion.linalg.DaskKroneckerProductOperator, mat1, mat2) def test_sqrt_fails(self): """Test that the square root fails for bad inputs. Specifically, non-square arrays and asymmetric arrays. """ kron_op = atmos_flux_inversion.linalg.DaskKroneckerProductOperator self.assertRaises( ValueError, kron_op(np.eye(3, 2), np.eye(3)).sqrt) self.assertRaises( ValueError, kron_op(np.eye(3), np.eye(2, 3)).sqrt) self.assertRaises( ValueError, kron_op(np.array([[1, 1], [0, 1]]), np.eye(3)).sqrt) @unittest2.skipUnless(HAVE_SPARSE, "sparse not installed") def test_sparse_first_argument(self): """Test sparse.COO in the first position.""" row = np.exp(-np.arange(20)) row[row < 0.005] = 0 matrix1 = scipy.linalg.toeplitz(row) operator1 = sparse.COO(matrix1) operator2 = sparse.eye(15) kron_op = atmos_flux_inversion.linalg.DaskKroneckerProductOperator( operator1, operator2) kron_mat = scipy.linalg.kron(matrix1, operator2.todense()) np_tst.assert_allclose( kron_op.dot(np.eye(kron_op.shape[0])), kron_mat) np_tst.assert_allclose( kron_op.dot(sparse.eye(kron_op.shape[0])).todense(), kron_mat) @unittest2.skipUnless(HAVE_SPARSE, "sparse not installed") @unittest2.expectedFailure def test_sparse_kron_quadratic_form(self): """Test that quadratic form of all sparse works.""" row = np.exp(-np.arange(20)) row[row < 0.005] = 0 matrix1 = scipy.linalg.toeplitz(row) operator1 = sparse.COO(row) operator2 = sparse.eye(15) kron_op = atmos_flux_inversion.linalg.DaskKroneckerProductOperator( operator1, operator2) kron_mat = scipy.linalg.kron(matrix1, operator2.todense()) np_tst.assert_allclose( kron_op.quadratic_form(sparse.eye(kron_op.shape[0])).todense(), kron_mat) class TestUtilKroneckerProduct(unittest2.TestCase): """Test atmos_flux_inversion.util.kronecker_product.""" def test_delegation(self): """Test that it delegates to subclasses where appropriate.""" HomogeneousIsotropicCorrelation = ( atmos_flux_inversion.correlations.HomogeneousIsotropicCorrelation) corr_class = atmos_flux_inversion.correlations.GaussianCorrelation corr_fun = corr_class(5) op1 = HomogeneousIsotropicCorrelation.from_function(corr_fun, 15) op2 = HomogeneousIsotropicCorrelation.from_function(corr_fun, 20) combined_op = atmos_flux_inversion.util.kronecker_product(op1, op2) proposed_result = HomogeneousIsotropicCorrelation.from_function( corr_fun, (15, 20)) self.assertIsInstance(combined_op, HomogeneousIsotropicCorrelation) self.assertSequenceEqual(combined_op.shape, tuple(np.multiply(op1.shape, op2.shape))) self.assertEqual(combined_op._underlying_shape, proposed_result._underlying_shape) np_tst.assert_allclose(combined_op._fourier_near_zero, proposed_result._fourier_near_zero) np_tst.assert_allclose(combined_op._corr_fourier, proposed_result._corr_fourier, rtol=1e-5, atol=1e-6) def test_array_array(self): """Test array-array Kronecker product.""" mat1 = np.eye(2) mat2 = np.eye(3) combined_op = atmos_flux_inversion.util.kronecker_product(mat1, mat2) self.assertIsInstance(combined_op, np.ndarray) self.assertSequenceEqual(combined_op.shape, tuple(np.multiply(mat1.shape, mat2.shape))) np_tst.assert_allclose(combined_op, scipy.linalg.kron(mat1, mat2)) def test_large_array_array(self): """Test large array-array Kronecker products. At some point it becomes faster to use Y&M kronecker representation than the dense one. """ mat1 = np.eye(1 << 5) mat2 = np.eye(1 << 6) combined = atmos_flux_inversion.util.kronecker_product(mat1, mat2) self.assertIsInstance( combined, atmos_flux_inversion.linalg.DaskKroneckerProductOperator) self.assertSequenceEqual(combined.shape, tuple(np.multiply(mat1.shape, mat2.shape))) def test_array_sparse(self): """Test array-sparse matrix Kronecker products.""" mat1 = np.eye(3) mat2 = scipy.sparse.eye(10) combined_op = atmos_flux_inversion.util.kronecker_product(mat1, mat2) big_ident = np.eye(30) self.assertIsInstance( combined_op, atmos_flux_inversion.linalg.DaskKroneckerProductOperator ) self.assertSequenceEqual(combined_op.shape, tuple(np.multiply(mat1.shape, mat2.shape))) np_tst.assert_allclose(combined_op.dot(big_ident), big_ident) def test_linop_array(self): """Test linop-sparse Kronecker products.""" op1 = atmos_flux_inversion.linalg.DiagonalOperator(np.arange(15)) mat2 = np.eye(10) combined_op = atmos_flux_inversion.util.kronecker_product(op1, mat2) self.assertIsInstance( combined_op, atmos_flux_inversion.linalg.SchmidtKroneckerProduct ) self.assertSequenceEqual(combined_op.shape, tuple(np.multiply(op1.shape, mat2.shape))) class TestUtilSchmidtDecomposition(unittest2.TestCase): """Test the Schimdt decomposition code in atmos_flux_inversion.linalg.""" def setUp(self): """Set up the test vectors.""" from scipy.linalg import kron # The notation here is borrowed from quantum computation. I # use the k prefix to indicate the vector has precisely one # nonzero entry, a one. The digits following are the binary # representation of the zero-based index of that one. self.k0 = np.array((1, 0)).reshape(-1, 1) self.k1 = np.array((0, 1)).reshape(-1, 1) self.k00 = kron(self.k0, self.k0) self.k01 = kron(self.k0, self.k1) self.k10 = kron(self.k1, self.k0) self.k11 = kron(self.k1, self.k1) self.k000 = kron(self.k0, self.k00) self.k001 = kron(self.k0, self.k01) self.k010 = kron(self.k0, self.k10) self.k011 = kron(self.k0, self.k11) self.k100 = kron(self.k1, self.k00) self.k101 = kron(self.k1, self.k01) self.k110 = kron(self.k1, self.k10) self.k111 = kron(self.k1, self.k11) def test_simple_combinations(self): """Test many combinations of vectors.""" possibilities = ( self.k0, self.k1, self.k00, self.k01, self.k10, self.k11) for vec1, vec2 in itertools.product(possibilities, possibilities): with self.subTest(vec1=vec1[:, 0], vec2=vec2[:, 0]): composite_state = scipy.linalg.kron(vec1, vec2) lambdas, vecs1, vecs2 = ( atmos_flux_inversion.linalg.schmidt_decomposition( composite_state, vec1.shape[0], vec2.shape[0])) np_tst.assert_allclose(np.nonzero(lambdas), [[0]]) np_tst.assert_allclose(np.abs(vecs1[0]), vec1[:, 0]) np_tst.assert_allclose(np.abs(vecs2[0]), vec2[:, 0]) np_tst.assert_allclose( lambdas[0] * scipy.linalg.kron( np.asarray(vecs1[:1].T), np.asarray(vecs2[:1].T)), composite_state) def test_composite_compination(self): """Test composite combinations.""" sqrt2 = math.sqrt(2) rsqrt2 = 1 / sqrt2 # b00 = (k00 + k11) / sqrt2 # b01 = (k00 - k11) / sqrt2 # b10 = (k01 + k10) / sqrt2 # b11 = (k01 - k10) / sqrt2 composite_state = ( scipy.linalg.kron(self.k0, self.k00) + scipy.linalg.kron(self.k1, self.k01)) / sqrt2 res_lambda, res_vec1, res_vec2 = ( atmos_flux_inversion.linalg.schmidt_decomposition( composite_state, 2, 4)) self.assertEqual(res_vec1.shape, (2, 2)) self.assertEqual(res_vec2.shape, (2, 4)) np_tst.assert_allclose(res_lambda, (rsqrt2, rsqrt2)) np_tst.assert_allclose( sum(lambd * scipy.linalg.kron( np.asarray(vec1).reshape(-1, 1), np.asarray(vec2).reshape(-1, 1)) for lambd, vec1, vec2 in zip(res_lambda, res_vec1, res_vec2)), composite_state) def test_epr_state(self): """Test that it correctly decomposes the EPR state.""" sqrt2o2 = math.sqrt(2) / 2 epr_state = (self.k01 - self.k10) * sqrt2o2 lambdas, vecs1, vecs2 = ( atmos_flux_inversion.linalg.schmidt_decomposition( epr_state, 2, 2 ) ) lambdas = np.asarray(lambdas) vecs1 = np.asarray(vecs1) vecs2 = np.asarray(vecs2) self.assertEqual(len(lambdas), 2) # This will not recover the original decomposition np_tst.assert_allclose(lambdas, (sqrt2o2, sqrt2o2)) self.assertAlmostEqual(np.prod(lambdas), .5) for vec1, vec2 in zip(vecs1, vecs2): if np.allclose(np.abs(vec1), self.k0[:, 0]): sign = 1 else: sign = -1 np_tst.assert_allclose(vec1, sign * vec2[-1::-1]) np_tst.assert_allclose( sum(lambd * scipy.linalg.kron( np.asarray(vec1).reshape(-1, 1), np.asarray(vec2).reshape(-1, 1)) for lambd, vec1, vec2 in zip(lambdas, vecs1, vecs2)), epr_state) def test_failure(self): """Test that schmidt_decomposition fails on invalid input.""" schmidt_decomp = atmos_flux_inversion.linalg.schmidt_decomposition schmidt_decomp(np.eye(6, 1), 2, 3) schmidt_decomp(np.arange(6), 2, 3) self.assertRaises( ValueError, schmidt_decomp, np.eye(6, 2), 2, 3) def test_big_vector(self): """Test size of results for large vectors.""" vec = np.arange(1000, dtype=float) lambdas, uvecs, vvecs = ( atmos_flux_inversion.linalg.schmidt_decomposition(vec, 10, 100)) self.assertLessEqual(len(lambdas), 10) self.assertNotIn(0, lambdas) np_tst.assert_allclose( sum(lambd[...] * scipy.linalg.kron( vec1.reshape(-1, 1), vec2.reshape(-1, 1))[:, 0] for lambd, vec1, vec2 in zip(lambdas, uvecs, vvecs)), vec, atol=1e-10) def test_small_nonzero(self): """Test that all returned data is significant.""" vec = np.eye(20, 1) lambdas, uvecs, vvecs = ( atmos_flux_inversion.linalg.schmidt_decomposition(vec, 4, 5)) self.assertNotIn(0, lambdas) def test_zeros(self): """Test that function gives sensible output on zero input.""" vec = np.zeros(20) lambdas, uvecs, vvecs = ( atmos_flux_inversion.linalg.schmidt_decomposition(vec, 4, 5) ) self.assertSequenceEqual(lambdas, [0]) class TestUtilIsOdd(unittest2.TestCase): """Test atmos_flux_inversion.linalg.is_odd.""" MAX_TO_TEST = 100 def test_known_odd(self): """Test known odd numbers.""" is_odd = atmos_flux_inversion.linalg_interface.is_odd for i in range(1, self.MAX_TO_TEST, 2): with self.subTest(i=i): self.assertTrue(is_odd(i)) def test_known_even(self): """Test known even numbers.""" is_odd = atmos_flux_inversion.linalg_interface.is_odd for i in range(0, self.MAX_TO_TEST, 2): with self.subTest(i=i): self.assertFalse(is_odd(i)) class TestUtilToLinearOperator(unittest2.TestCase): """Test atmos_flux_inversion.linalg.tolinearoperator.""" def test_tolinearoperator(self): """Test that tolinearoperator returns LinearOperators.""" tolinearoperator = atmos_flux_inversion.linalg.tolinearoperator for trial in (0, 1., (0, 1), [0, 1], ((1, 0), (0, 1)), [[0, 1.], [1., 0]], np.arange(5), scipy.sparse.identity(8),
np.arange(10)
numpy.arange
''' Comparison of Continuation/Generalized-inverse (G/Ginv) method and Continuation/Generalized Minimum RESidual (C/GMRES) method Two-link Arm system Made in Feb. 2022 ver. 0.1 Fer. 2022 ver. 0.1.1 Bug fixed. BSD 2-Clause License Copyright (c) 2022, <NAME> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ''' import numpy as np import matplotlib.pyplot as plt from CGinv import C_Ginv from CGMRES import C_GMRES import time ########################### ## simulation parameters ## ########################### ################################## ## common simulation parameters ## ################################## state_dim=4 # state dimension input_dim=2 # input dimension t0=0.0 # initial time [s] N=4 # Integration steps within the MPC computation dt=0.01 # Time step for evolution of actual time [s] Tf=1.0 # Simulation duration [s] max_iter=int((Tf-t0)/dt)+1 # iteration of simulation (for loop iteration) #################### ## Initial state ## #################### x_init=np.zeros(state_dim) x_init[0]=-np.pi/180*45 x_init[1]=-np.pi/180*60 x_init[0]=-np.pi/180*45 x_init[1]=-np.pi/180*60 ################### ## target state ## ################### x_ref=np.zeros(state_dim) ############################################# ############################################# ############################################# ## Parameters for C/Ginv simulation ## ############################################# ############################################# ############################################# diff_order=2 # k of u^(k)=0 T_CGinv=0.13 # Prediction horizon [s] for C/Ginv zeta_CGinv=1/dt # coefficient for continuation method ## parameters for Gauss-Newton methods tol_CGinv = 1e-5 # terminates iteration when norm(Func) < tol max_iter_GaussNewton = 15 # maximum iteration of Gauss-Newton method k_CGinv = 1 # damping coefficient inside Gauss-Newton method ############################################# ############################################# ############################################# ## Parameters for C/GMRES simulation ## ############################################# ############################################# T_CGMRES=0.24 # Prediction horizon [s] for C/GMRES zeta_CGMRES=1/dt # coefficient for continuation method ######################################## ## J= x(t+T)^T*S*x(t+T)/2 ## ## +Int[x^T*Q*x/2+u^T*R*u/2]dt ## ######################################## Q=np.eye(state_dim, state_dim) R=np.eye(input_dim, input_dim) S=np.eye(state_dim, state_dim) Q[0,0]=40 Q[1,1]=20 Q[2,2]=0.01 Q[3,3]=0.01 R[0,0]=0.1 R[1,1]=0.1 S[0,0]=4 S[1,1]=2 S[2,2]=0.001 S[3,3]=0.001 Q[0,0]=40 Q[1,1]=20 Q[2,2]=0.01 Q[3,3]=0.01 R[0,0]=0.07 R[1,1]=0.07 S[0,0]=4 S[1,1]=2 S[2,2]=0.001 S[3,3]=0.001 ## parameters for Iteration methods tol_CGMRES = 1e-5 # terminates iteration when norm(Func) < tol max_iter_Newton = 15 # maximum iteration of Gauss-Newton method max_iter_FDGMRES = 2 # maximum iteration of Gauss-Newton method k_CGMRES = 1 # damping coefficient inside Gauss-Newton method ## file_name for saving graphs ## file_name='Compare_Two-linkArm_TGinv'+str(T_CGinv)+'TGMRES'+str(T_CGMRES)+'N'+str(N)+'dt'+str(dt) ################################## ## u^(2)=0 ### ################################## def dUdt_2nd_order(U): dUdt=np.zeros(U.shape[0]) dUdt[0]=U[2] dUdt[1]=U[3] dUdt[2]=0 dUdt[3]=0 return dUdt ################################## ## u^(3)=0 ### ################################## def dUdt_3rd_order(U): dUdt=np.zeros(U.shape[0]) dUdt[0]=U[2] dUdt[1]=U[3] dUdt[2]=U[4] dUdt[3]=U[5] dUdt[4]=0 dUdt[5]=0 return dUdt ################################## ## u^(K)=0 ### ################################## class dUdt: def __init__(self, diff_odr, input_dim): self.diff_odr=diff_odr self.input_dim=input_dim def Kth_order(self, U): dUdt=np.zeros(U.shape[0]) diff_order=self.diff_odr input_dim=self.input_dim for i in range(diff_order-1): for j in range(input_dim): dUdt[input_dim*i+j]=U[input_dim+input_dim*i+j] for i in range(input_dim): dUdt[input_dim*diff_order-1-i]=0 return dUdt ########################### ## Two-link arm system ## ########################### ## system parameters ## ####################### m1=0.25#[kg] l1=0.5#[m] I1=0.0125#[kgm^2] m2=0.25#[kg] l2=0.5#[m] I2=0.0125#[kgm^2] ###################################### ## state func of Double pendulumn ### ###################################### def plant(t, x, u): dxdt = np.zeros(x.shape[0]) Mat=np.zeros([2,2]) Mat[0,0]=(m1/4+m2)*l1*l1+I1 Mat[0,1]=m2*l1*l2/2*np.cos(x[0]-x[1]) Mat[1,0]=Mat[0,1] Mat[1,1]= m2/4*l2*l2+I2 C=np.zeros(2) C[0]=-m2*l1*l2/2*x[3]**2*np.sin(x[0]-x[1])\ +u[0]-u[1] C[1]= m2*l1*l2/2*x[2]**2*np.sin(x[0]-x[1])\ +u[1] tmp=np.linalg.solve(Mat,C) dxdt[0]=x[2] dxdt[1]=x[3] dxdt[2]=tmp[0] dxdt[3]=tmp[1] return dxdt ############################ ############################ ############################ ############################ ## C/Ginv simulation part ## ############################ ############################ ############################ ############################ ########################### ## Controller definition ## ########################### UFunc=dUdt(diff_order, input_dim) #CGinv=C_Ginv(plant, dUdt_2nd_order, input_dim) #CGinv=C_Ginv(plant, dUdt_3th_order, input_dim) Ctrler=C_Ginv(plant, UFunc.Kth_order, input_dim) ################# ## state : x ## ################# x=np.zeros([max_iter+1,state_dim]) x[0,:]=x_init ############## ## input: u ## ############## u=np.zeros([max_iter+1,input_dim]) ############################################################ ## Big U: U:=[u, u', u", u^(3),...,u^(diff_order-1)]^T ## ############################################################ U_init =np.zeros(diff_order*input_dim) ############# ## time: t ## ############# t=np.zeros(max_iter+1) t[0]=t0 ################################# ## list for graph of calc_time ## ################################# t_list=[] calc_time_list=[] ################################### ## variable for timing calc_time ## ################################### t_start=None t_end=None ############################ ### Start #### ### MPC simulation #### ### by C/Ginv #### ############################ ################### ### start loop #### ################### ### loop 0 #### ################### ############################ ### MPC computation #### ############################ t_start = time.time() u[0] = Ctrler.u_init(x[0], x_ref, t[0], T_CGinv, U_init, N, tolerance=tol_CGinv, max_iter=max_iter_GaussNewton, k=k_CGinv) t_end = time.time() calc_time_list.append(t_end-t_start) t_list.append(t[0]) ## displaying some results ## print('t:{:.2g}'.format(t[0]),'[s] | u[',0,'] =',u[0]) print(' F(t,x,U):evaluation_count =',Ctrler.F.eval_count,'times') print(' calc time ={:.4g}'.format(t_end-t_start),'[s]') print(' N =',N,', Horizon=',T_CGinv,'[s]') F=Ctrler.F(t[0],x[0],Ctrler.U) print(' |F(t,x,U)|=',np.linalg.norm(F)) ##################################### ### time evolution of real plant #### ##################################### x[1] = x[0] + plant(t[0],x[0],u[0]) * dt t[1] = t[0] + dt ## printing how close we are to the target state ## print('|x[0]-x_ref| =',np.linalg.norm(x[1]-x_ref)) print() ## resetting count of evaluation of F(t,x,U) ## Ctrler.F.eval_count = 0 ############################ ### loops 1 ~ max_iter #### ############################ for i in range(1,max_iter): ############################ ### MPC computation #### ############################ t_start = time.time() u[i] = Ctrler.u(x[i],x_ref,t[i],T_CGinv,Ctrler.U,N,dt,zeta_CGinv) t_end = time.time() calc_time_list.append(t_end-t_start) t_list.append(t[i]) ## displaying some results ## print('t:{:.5g}'.format(t[i]),'[s] | u[',i,'] =',u[i]) print(' F(t,x,U):evaluation_count =',Ctrler.F.eval_count,'times') print(' calc time ={:.4g}'.format(t_end-t_start),'[s]') print(' N =',N,', Horizon=',T_CGinv,'[s]') F=Ctrler.F(t[i],x[i],Ctrler.U) print(' |F(t,x,U)|=',np.linalg.norm(F)) print(' |x[',i,']-x_ref|=',np.linalg.norm(x[i]-x_ref)) print() ##################################### ### time evolution of real plant #### ##################################### x[i+1]=x[i]+plant(t[i],x[i], u[i])*dt t[i+1]=t[i]+dt ## resetting count of evaluation of F(t,x,U) ## Ctrler.F.eval_count=0 ## displaying calculation time results ## calc_time_CGinv=np.array(calc_time_list) max_index_CGinv=np.argmax(calc_time_CGinv) min_index_CGinv=np.argmin(calc_time_CGinv) avg_calc_time_CGinv=
np.mean(calc_time_CGinv)
numpy.mean
#copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ import numpy as np import time import tensorflow as tf from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig import glob import os import sys from npu_bridge.estimator import npu_ops from dllogger.logger import LOGGER import dllogger.logger as dllg #input_shape = [512, 512, 1] # (height, width, channel) # 用户自定义模型路径、输入、输出 model_path='./unet-industrial_tf.pb' input_tensor_name='input:0' output_tensor_name='output:0' class Classifier(object): def __init__(self): config = tf.ConfigProto() custom_op = config.graph_options.rewrite_options.custom_optimizers.add() custom_op.name = "NpuOptimizer" custom_op.parameter_map["use_off_line"].b = True custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("force_fp16") config.graph_options.rewrite_options.remapping = RewriterConfig.OFF custom_op.parameter_map["graph_run_mode"].i = 0 self.graph = self.__load_model(model_path) self.input_tensor = self.graph.get_tensor_by_name(input_tensor_name) self.output_tensor = self.graph.get_tensor_by_name(output_tensor_name) # create session self.sess = tf.Session(config=config, graph=self.graph) def __load_model(self, model_file): with tf.gfile.GFile(model_file, "rb") as gf: graph_def = tf.GraphDef() graph_def.ParseFromString(gf.read()) with tf.Graph().as_default() as graph: tf.import_graph_def(graph_def, name="") return graph def do_infer(self, batch_data): out = self.sess.run(self.output_tensor, feed_dict={self.input_tensor: batch_data}) return out def DAGM2007_Dataset(data_dir, class_id=1, batch_size=1): data_dir = os.path.join(data_dir, "raw_images/private/Class%d" % class_id) csv_file = os.path.join(data_dir, "test_list.csv") image_dir = os.path.join(data_dir, "Test") mask_image_dir = os.path.join(data_dir, "Test/Label") input_shape = mask_shape = [512, 512, 1] shuffle_buffer_size = 10000 def decode_csv(line): input_image_name, image_mask_name, label = tf.decode_csv( line, record_defaults=[[""], [""], [0]], field_delim=',' ) def decode_image(filepath, resize_shape, normalize_data_method): image_content = tf.read_file(filepath) image = tf.image.decode_png(contents=image_content, channels=resize_shape[-1], dtype=tf.uint8) image = tf.image.resize_images( image, size=resize_shape[:2], method=tf.image.ResizeMethod.BILINEAR, # [BILINEAR, NEAREST_NEIGHBOR, BICUBIC, AREA] align_corners=False, preserve_aspect_ratio=True ) image.set_shape(resize_shape) image = tf.cast(image, tf.float32) if normalize_data_method == "zero_centered": image = tf.divide(image, 127.5) - 1 elif normalize_data_method == "zero_one": image = tf.divide(image, 255.0) return image input_image = decode_image( filepath=tf.strings.join([image_dir, input_image_name], separator='/'), resize_shape=input_shape, normalize_data_method="zero_centered", ) mask_image = tf.cond( tf.equal(image_mask_name, ""), true_fn=lambda: tf.zeros(mask_shape, dtype=tf.float32), false_fn=lambda: decode_image( filepath=tf.strings.join([mask_image_dir, image_mask_name], separator='/'), resize_shape=mask_shape, normalize_data_method="zero_one", ), ) label = tf.cast(label, tf.int32) return (input_image, mask_image), label dataset = tf.data.TextLineDataset(csv_file) dataset = dataset.skip(1) # Skip CSV Header dataset = dataset.cache() dataset = dataset.apply( tf.data.experimental.map_and_batch( map_func=decode_csv, num_parallel_calls=64, batch_size=batch_size, drop_remainder=True, ) ) dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE) return dataset def iou_score_fn(y_pred, y_true, threshold, eps=1e-5): y_true = y_true > threshold y_pred = y_pred > threshold y_true = y_true.astype(np.float32) y_pred = y_pred.astype(np.float32) intersection = y_true * y_pred intersection = tf.reduce_sum(intersection, axis=(1, 2, 3)) numerator = 2.0 * intersection + eps divisor = tf.reduce_sum(y_true, axis=(1, 2, 3)) + tf.reduce_sum(y_pred, axis=(1, 2, 3)) + eps return tf.reduce_mean(numerator / divisor) def main(): filepath = sys.argv[1] classifier = Classifier() ds = DAGM2007_Dataset(data_dir=filepath, class_id=1, batch_size=2) iter = ds.make_initializable_iterator() ds_sess = tf.Session() ds_sess.run(iter.initializer) next_element = iter.get_next() eval_metrics = dict() IOU_THS = [[],[],[],[],[],[],[],[]] i = 1 while True: try: # features input = ds_sess.run(next_element) batch_data = input[0] batch_labels = input[1] # input_image, mask_image, labels input_image = batch_data[0] mask_image = batch_data[1] labels = batch_labels y_pred = classifier.do_infer(input_image) labels = tf.cast(labels, tf.float32) labels_preds = tf.reduce_max(y_pred, axis=(1, 2, 3)) j = 0 for threshold in [0.05, 0.125, 0.25, 0.5, 0.75, 0.85, 0.95, 0.99]: tf.reset_default_graph() with tf.Session() as eval_sess: iou_score = iou_score_fn(y_pred=y_pred, y_true=mask_image, threshold=threshold) eval_results = eval_sess.run(iou_score) eval_metrics["IoU_THS_%s" % threshold] = tf.metrics.mean(iou_score) IOU_THS[j].append(eval_results) j += 1 i += 1 print("======batch %s finished ======" % str(i)) except tf.errors.OutOfRangeError as e: print("### Total IoU_THS_0.05: ", np.mean(IOU_THS[0])) print("### Total IoU_THS_0.125: ", np.mean(IOU_THS[1])) print("### Total IoU_THS_0.25: ", np.mean(IOU_THS[2])) print("### Total IoU_THS_0.5: ", np.mean(IOU_THS[3])) print("### Total IoU_THS_0.75: ",
np.mean(IOU_THS[4])
numpy.mean
''' Copyright 2020 Xilinx Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' ''' Evaluation of frozen/quantized graph Author: <NAME> ''' import os import sys import argparse import shutil import numpy as np import cv2 from progressbar import ProgressBar # Silence TensorFlow messages os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # workaround for TF1.15 bug "Could not create cudnn handle: CUDNN_STATUS_INTERNAL_ERROR" os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true' import tensorflow as tf import tensorflow.contrib.decent_q from tensorflow.python.platform import gfile from preprocess import preprocess DIVIDER = '-----------------------------------------' def graph_eval(input_graph_def, input_node, output_node, dataset, batchsize): images = [] ground_truth = [] for root, dirs, files in os.walk(os.path.join(dataset, 'test')): for filename in files: class_id,_ = filename.split('.', 1) images.append(preprocess(os.path.join(root,filename))) ground_truth.append(class_id) print('Found',len(images),'images and',len(ground_truth),'ground_truth') tf.import_graph_def(input_graph_def,name = '') # Get input placeholders & tensors input_tensor = tf.compat.v1.get_default_graph().get_tensor_by_name(input_node+':0') # get output tensors predict = tf.compat.v1.get_default_graph().get_tensor_by_name(output_node+':0') # Create the Computational graph with tf.compat.v1.Session() as sess: predictions = [] progress = ProgressBar() sess.run(tf.compat.v1.initializers.global_variables()) for i in progress(range(len(images)//batchsize)): # make batches of images img_batch = images[i*batchsize:i*batchsize+batchsize] # run session to get a batch of predictions feed_dict={input_tensor: img_batch} pred = sess.run([predict], feed_dict) for i in range(len(pred[0])): if
np.argmax(pred[0][i])
numpy.argmax
# Hint install pytest in the google environment, # otherwise it will watch for the google modules # outside of the environment. # Workaround: # python -m pytest test_process_lake.py # https://pypi.org/project/pytest-mock/ # pip install pytest # pip install pytest-mock # pip install pytest-cov import numpy as np import os import pytest import process_lake as pl import databases as db def test_get_measurement_foders(): """Still clumsy check""" lake = os.path.expanduser("~/DataLakeTest") ret = pl.get_measurement_folders(lake) exp = [ "results_0_unpack", "results_1_unpack", "results_2_unpack", "results_4_unpack", "results_5_unpack", # uses collectd ] assert ret == exp def test_get_relevant_measurement_folders_real(): exp = [ "results_1_unpack", "results_2_unpack", "results_4_unpack", "results_5_unpack", ] lake = os.path.expanduser("~/DataLakeTest") valid = "results_1_unpack" ret, valid = pl.get_relevant_measurement_folders(lake, valid) assert ret == exp assert valid == 4 def test_get_relevant_measurement_folders_mocked(mocker): exp = ["results_1_unpack", "results_2_unpack", "results_4_unpack"] lake = os.path.expanduser("~/DataLakeTest") valid = "results_1_unpack" mock = mocker.patch("process_lake.get_measurement_folders") mock.return_value = [ "results_0_unpack", "results_1_unpack", "results_2_unpack", "results_4_unpack", ] ret, valid = pl.get_relevant_measurement_folders(lake, valid) assert ret == exp assert valid == 3 def test_get_relevant_measurement_folders_mocked_5(mocker): exp = [ "results_1_unpack", "results_2_unpack", "results_4_unpack", "results_5_unpack", ] lake = os.path.expanduser("~/DataLakeTest") valid = "results_1_unpack" mock = mocker.patch("process_lake.get_measurement_folders") mock.return_value = [ "results_0_unpack", "results_1_unpack", "results_2_unpack", "results_4_unpack", "results_5_unpack", ] ret, valid = pl.get_relevant_measurement_folders(lake, valid) assert ret == exp assert valid == 4 def test_get_relevant_measurement_folders_mocked_1(mocker): exp = ["results_1_unpack"] lake = os.path.expanduser("~/DataLakeTest") valid = "results_1_unpack" mock = mocker.patch("process_lake.get_measurement_folders") mock.return_value = ["results_1_unpack"] ret, valid = pl.get_relevant_measurement_folders(lake, valid) assert ret == exp assert valid == 1 def test_get_relevant_measurement_folders_mocked_0(mocker): lake = os.path.expanduser("~/DataLakeTest") valid = "results_1_unpack" mock = mocker.patch("process_lake.get_measurement_folders") mock.return_value = ["results_0_unpack"] with pytest.raises(SystemError): pl.get_relevant_measurement_folders(lake, valid) def test_generate(mocker): """Integrtion test for generate""" lake = os.path.expanduser("~/DataLakeTest") show = False style = "none" testdata = True foldermock = mocker.patch( "process_lake.get_relevant_measurement_folders", return_value=(3, 1) ) earliest = "results_107_unpack" cputables = 4 memtables = 1 stacktables = 1 metatables = 1 cpumock = mocker.MagicMock(name="cpuobject") mocker.patch("databases.CpuHistory", return_value=cpumock) memmock = mocker.MagicMock(name="memobject") mocker.patch("databases.MemoryHistory", return_value=memmock) stackmock = mocker.MagicMock(name="memobject") mocker.patch("databases.CpuHistoryStacked", return_value=stackmock) metamock = mocker.MagicMock(name="memobject") mocker.patch("databases.MeasurementMetadata", return_value=metamock) pl.generate(style, show, lake, testdata, earliest) foldermock.assert_called_with(lake, earliest) cpumock.update_table.assert_called_with() assert cpumock.update_table.call_count == cputables memmock.update_table.assert_called_with() assert memmock.update_table.call_count == memtables stackmock.update_table.assert_called_with() assert stackmock.update_table.call_count == stacktables metamock.update_table.assert_called_with() assert metamock.update_table.call_count == metatables # we seem to not be able to use ANY here, so assert at least the # call count assert cpumock.postprocess.call_count == cputables assert memmock.postprocess.call_count == memtables assert stackmock.postprocess.call_count == stacktables assert metamock.postprocess.call_count == metatables def test_postprocess_vals_cpu(): """This is an integration test! Tighten current functionality for now Probably too much for a simple test """ lake = os.path.expanduser("~/DataLakeTest") relevant_measurement_folders = [ "results_1_unpack", "results_2_unpack", "results_4_unpack", ] data_length = 10 client = None testmode = True cpu_array = db.CpuHistory( lake, "name", len(relevant_measurement_folders), data_length, client, testmode, ) cpu_array.postprocess( relevant_measurement_folders, "publish_sawmill_record_statistics", "stat_mapper_stdout", "tedge_mapper", ) # programmatically reproduce the data set data = [] for i in range(len(relevant_measurement_folders) * data_length): if i < 20: k = (i + 10) // 10 else: k = 4 if i < 18 or i >= 20: ut = i + 1 st = i + 2 else: ut = 0 # hint: missing data here st = 0 data.append([i, k, i % 10, ut, st, 0, 0]) exp = np.array(data, dtype=np.int32) extensive_check = True if extensive_check: print("\nExpect") print(len(exp)) print(exp) print("There") print(cpu_array.size) print(cpu_array.array) for i in range(len(data)): print("Line", i,
np.array_equal(exp[i], cpu_array.array[i])
numpy.array_equal
import json import os import time from abc import ABC import numpy as np import ray import torch from agent0.common.utils import LinearSchedule, set_random_seed from agent0.deepq.actor import Actor from agent0.deepq.agent import Agent from agent0.deepq.config import Config from ray import tune from ray.tune.trial import ExportFormat class Trainer(tune.Trainable, ABC): def __init__(self, config=None, logger_creator=None): self.Rs, self.Qs, self.TRs, self.Ls, self.ITRs, self.velocity = [], [], [], [], [], [] self.cfg = None self.agent = None self.epsilon = None self.epsilon_schedule = None self.actors = None self.frame_count = None self.Rs, self.Qs, self.TRs, self.Ls, self.ITRs = [], [], [], [], [] self.best = float('-inf') self.sample_ops = None super(Trainer, self).__init__(config, logger_creator) def setup(self, config): self.cfg = Config(**config) self.cfg.update_atoms() set_random_seed(self.cfg.random_seed) print("input args:\n", json.dumps(vars(self.cfg), indent=4, separators=(",", ":"))) self.agent = Agent(**config) self.epsilon_schedule = LinearSchedule(1.0, self.cfg.min_eps, self.cfg.exploration_steps) self.actors = [ray.remote(Actor).options(num_gpus=0.1 * self.cfg.gpu_mult).remote(rank=rank, **config) for rank in range(self.cfg.num_actors)] self.frame_count = 0 self.best = float('-inf') self.epsilon = 1.0 self.sample_ops = [a.sample.remote(self.cfg.actor_steps, 1.0, self.agent.model.state_dict()) for a in self.actors] def step(self): fraction_loss = None ce_loss = None tic = time.time() done_id, self.sample_ops = ray.wait(self.sample_ops) data = ray.get(done_id) transitions, rs, qs, rank, fps, best_ep = data[0] # Actors if len(transitions) > 0: self.agent.replay.extend(transitions) if len(best_ep) > 0: self.agent.replay.extend_ep_best(best_ep) self.epsilon = self.epsilon_schedule(self.cfg.actor_steps * self.cfg.num_envs) self.frame_count += self.cfg.actor_steps * self.cfg.num_envs self.sample_ops.append( self.actors[rank].sample.remote(self.cfg.actor_steps, self.epsilon, self.agent.model.state_dict())) self.Rs += rs self.Qs += qs # Start training at if len(self.agent.replay) > self.cfg.start_training_step: data = [self.agent.train_step() for _ in range(self.cfg.agent_train_steps)] if self.cfg.algo in ['fqf']: fraction_loss = torch.stack([x['fraction_loss'] for x in data]).mean().item() if self.cfg.best_ep: ce_loss = torch.stack([x['ce_loss'] for x in data]).mean().item() loss = [x['loss'] for x in data] loss = torch.stack(loss) self.Ls += loss.tolist() toc = time.time() self.velocity.append(self.cfg.actor_steps * self.cfg.num_envs / (toc - tic)) result = dict( game=self.cfg.game, time_past=self._time_total, epsilon=self.epsilon, adam_lr=self.cfg.adam_lr, frames=self.frame_count, fraction_loss=fraction_loss if fraction_loss is not None else 0, ce_loss=ce_loss if ce_loss is not None else 0, velocity=np.mean(self.velocity[-20:]) if len(self.velocity) > 0 else 0, speed=self.frame_count / (self._time_total + 1), time_remain=(self.cfg.total_steps - self.frame_count) / ((self.frame_count + 1) / (self._time_total + 1)), loss=np.mean(self.Ls[-20:]) if len(self.Ls) > 0 else 0, ep_reward_test=np.mean(self.ITRs) if len(self.ITRs) > 0 else 0, ep_reward_train=np.mean(self.Rs[-20:]) if len(self.Rs) > 0 else 0, ep_reward_train_max=np.max(self.Rs) if len(self.Rs) > 0 else 0, ep_reward_test_max=np.max(self.TRs) if len(self.TRs) > 0 else 0, qmax=np.mean(self.Qs[-100:]) if len(self.Qs) > 0 else 0 ) return result def save_checkpoint(self, checkpoint_dir): print(f"Iteration {self.training_iteration} testing started") output = ray.get([a.sample.remote(self.cfg.actor_steps, self.cfg.test_eps, self.agent.model.state_dict(), testing=True, test_episodes=self.cfg.test_episode_per_actor) for a in self.actors]) ckpt_rs = [] for _, rs, qs, rank, fps, _ in output: ckpt_rs += rs self.ITRs = ckpt_rs self.TRs += ckpt_rs print(f"Iteration {self.training_iteration} test Result(mean|std|max|min|len):" f" {np.mean(ckpt_rs)}\t{
np.std(ckpt_rs)
numpy.std
""" @brief test log(time=120s) """ import unittest import warnings import sys from logging import getLogger from contextlib import redirect_stdout from io import StringIO import numpy import onnx from scipy.sparse import coo_matrix, csr_matrix, SparseEfficiencyWarning from scipy.special import ( # pylint: disable=E0611 expit as logistic_sigmoid, erf) from scipy.spatial.distance import cdist from onnx import TensorProto, __version__ as onnx_version from onnx.helper import make_sparse_tensor, make_tensor from onnx.defs import onnx_opset_version from onnx.numpy_helper import from_array from pyquickhelper.pycode import ExtTestCase from pyquickhelper.texthelper import compare_module_version from sklearn.utils.extmath import softmax try: from sklearn.utils._testing import ignore_warnings except ImportError: from sklearn.utils.testing import ignore_warnings from skl2onnx.algebra.onnx_ops import ( # pylint: disable=E0611 OnnxAbs, OnnxAdd, OnnxAnd, OnnxArgMax_11, OnnxArgMax, OnnxArgMin_11, OnnxArgMin, OnnxBatchNormalization, OnnxAcos, OnnxAcosh, OnnxAsin, OnnxAsinh, OnnxAtan, OnnxAtanh, OnnxAveragePool, OnnxCast, OnnxCeil, OnnxClip, OnnxCompress, OnnxConcat, OnnxConv, OnnxConvTranspose, OnnxConstant, OnnxConstant_9, OnnxConstant_11, OnnxConstant_12, OnnxConstant_13, OnnxConstantOfShape, OnnxCos, OnnxCosh, OnnxCumSum, OnnxDequantizeLinear, OnnxDet, OnnxDiv, OnnxDropout, OnnxDropout_7, OnnxEinsum, OnnxEqual, OnnxErf, OnnxExp, OnnxEyeLike, OnnxFlatten, OnnxFloor, OnnxGreater, OnnxGreaterOrEqual, OnnxGemm, OnnxGlobalAveragePool, OnnxIdentity, OnnxIsNaN, OnnxLess, OnnxLessOrEqual, OnnxLog, OnnxLpNormalization, OnnxMatMul, OnnxMax, OnnxMaxPool, OnnxMean, OnnxMin, OnnxMod, OnnxMul, OnnxNeg, OnnxNot, OnnxOr, OnnxPad, OnnxPow, OnnxQLinearConv, OnnxQuantizeLinear, OnnxRange, OnnxReciprocal, OnnxReduceL1, OnnxReduceL2, OnnxReduceLogSumExp, OnnxReduceMax, OnnxReduceMean, OnnxReduceMin, OnnxReduceProd, OnnxReduceSum, OnnxReduceSumApi11, OnnxReduceSum_11, OnnxReduceSum_1, OnnxReduceSumSquare, OnnxRelu, OnnxReshape, OnnxRound, OnnxScatterElements, OnnxShape, OnnxSlice, OnnxSigmoid, OnnxSign, OnnxSin, OnnxSinh, OnnxSize, OnnxSoftmax, OnnxSplit, OnnxSplitApi11, OnnxSqrt, OnnxSub, OnnxSum, OnnxSqueeze, OnnxSqueezeApi11, OnnxTan, OnnxTanh, OnnxTopK, OnnxTranspose, OnnxUnsqueeze, OnnxUnsqueezeApi11 ) try: from skl2onnx.algebra.onnx_ops import OnnxCelu except ImportError: OnnxCelu = None try: from skl2onnx.algebra.onnx_ops import OnnxBatchNormalization_14 except ImportError: OnnxBatchNormalization_14 = None from skl2onnx import __version__ as skl2onnx_version, __max_supported_opset__ from mlprodict.onnxrt import OnnxInference from mlprodict.tools.asv_options_helper import ( get_opset_number_from_onnx, get_ir_version_from_onnx) from mlprodict.onnxrt.validate.validate_python import validate_python_inference from mlprodict.onnxrt.ops_cpu.op_batch_normalization import ( _batchnorm_test_mode, _batchnorm_training_mode) from mlprodict.onnxrt.ops_cpu.op_average_pool import ( _get_output_shape, _pool, _get_pad_shape) from mlprodict.onnxrt.ops_cpu.op_global_average_pool import _global_average_pool from mlprodict.onnxrt.ops_cpu._op_onnx_numpy import ( # pylint: disable=E0611,E0401 topk_element_min_double, topk_element_max_double, topk_element_fetch_double, topk_element_min_float, topk_element_max_float, topk_element_fetch_float, topk_element_min_int64, topk_element_max_int64, topk_element_fetch_int64) from mlprodict.onnxrt.ops_cpu.op_celu import _vcelu1, pycelu from mlprodict.onnxrt.ops_cpu.op_topk import topk_sorted_implementation from mlprodict.onnxrt.ops_cpu.op_pad import _pad_impl from mlprodict.onnxrt.ops_cpu.op_max_pool import ( _pool_get_output_shape, _pool_impl) from mlprodict.onnxrt.ops_cpu.op_dropout import _dropout from mlprodict.onnxrt.ops_cpu._op_helper import proto2dtype from mlprodict.onnx_tools.onnx2py_helper import ( guess_proto_dtype, _elem_type_as_str) from mlprodict.tools.data_types import ( FloatTensorType, Int64TensorType, DoubleTensorType, StringTensorType, Int32TensorType, BooleanTensorType, UInt8TensorType, Int16TensorType, Int8TensorType, UInt16TensorType, UInt32TensorType, UInt64TensorType, Float16TensorType) from mlprodict.testing.test_utils.quantized_tensor import ( QuantizedTensor, QuantizedBiasTensor, test_qlinear_conv) from mlprodict.onnxrt.ops_cpu.op_qlinear_conv_ import ( # pylint: disable=W0611,E0611,E0401 test_qgemm0, test_qgemm1) from mlprodict.onnxrt.ops_cpu.op_constant import Constant_12, Constant_11, Constant_9 try: numpy_str = numpy.str_ except ImportError: numpy_str = str try: numpy_bool = numpy.bool_ except ImportError: numpy_bool = bool sparse_support = [] sparse_no_numpy = [] python_tested = [] def make_coo_matrix(*args, **kwargs): coo = coo_matrix(*args, **kwargs) coo.row = coo.row.astype(numpy.int64) coo.col = coo.col.astype(numpy.int64) return coo def wraplog(): # from datetime import datetime def wrapper(fct): def call_f(self): # no = datetime.now() # print('BEGIN %s' % fct.__name__) with warnings.catch_warnings(record=True): warnings.simplefilter("always", DeprecationWarning) fct(self) # print('DONE %s - %r' % (fct.__name__, datetime.now() - no)) return call_f return wrapper class TestOnnxrtPythonRuntime(ExtTestCase): # pylint: disable=R0904 @classmethod def setUpClass(cls): pass @classmethod def tearDownClass(cls): if __name__ == "__main__": import pprint print('-----------') pprint.pprint(sparse_support) print('-----------') pprint.pprint(sparse_no_numpy) print('-----------') pprint.pprint( list(sorted({_.__name__ for _ in python_tested}))) print('-----------') def setUp(self): logger = getLogger('skl2onnx') logger.disabled = True def test_opset_skl2onnx(self): opset_mlprodict = get_opset_number_from_onnx() opset_skl2onnx = __max_supported_opset__ self.assertGreater(opset_skl2onnx, opset_mlprodict) def common_expected_shapes_types(self, oinf, inputs, got, onnx_cl, model_def, raise_shape=False): expected_types = oinf.infer_types() self.assertEqual(set(got) & set(expected_types), set(got)) for k, v in got.items(): if expected_types[k] in (str, numpy.str_): # Type mismatch: dtype('<U32') != <class 'str'> continue if v.dtype != expected_types[k]: raise AssertionError( "Type mismatch: %r != %r\nexpected_types=%r\ngot=%r" "\n----\n%r" % ( v.dtype, expected_types[k], expected_types, got, model_def)) try: expected_shapes = oinf.infer_shapes() self.assertEqual(set(got) & set(expected_shapes), set(got)) except RuntimeError as e: if raise_shape: raise e warnings.warn("infer_shapes fails for operator %r." % onnx_cl) res = oinf.infer_sizes(inputs) self.assertIsInstance(res, dict) @ignore_warnings(category=(RuntimeWarning, DeprecationWarning, SparseEfficiencyWarning, PendingDeprecationWarning)) def common_test_onnxt_runtime_unary(self, onnx_cl, np_fct, op_version=None, outputs=None, debug=False, do_sparse=True, raise_shape=False): if op_version is None: op_version = get_opset_number_from_onnx() try: onx = onnx_cl('X', output_names=['Y'], op_version=op_version) except RuntimeError as e: raise RuntimeError('onnx.opset={} op_version={}'.format( get_opset_number_from_onnx(), op_version)) from e X = numpy.array([[1, 2], [3, -4]], dtype=numpy.float64) model_def = onx.to_onnx( {'X': X.astype(numpy.float32)}, target_opset=op_version, outputs=outputs) if debug: print(model_def) python_tested.append(onnx_cl) # python code oinfpy = OnnxInference(model_def, runtime="python", inplace=True) validate_python_inference(oinfpy, {'X': X.astype(numpy.float32)}) # no inplace oinf = OnnxInference(model_def, inplace=False) all_names = "\n".join( "%s>=v%d" % (op.ops_.__class__.__name__, op.ops_._schema.since_version) # pylint: disable=W0212 for op in oinf.sequence_) if debug: got = oinf.run({'X': X.astype(numpy.float32)}, verbose=1, fLOG=print) else: got = oinf.run({'X': X.astype(numpy.float32)}) self.assertEqual(list(sorted(got)), ['Y']) self.common_expected_shapes_types( oinf, {'X': X.astype(numpy.float32)}, got, onnx_cl, model_def, raise_shape=raise_shape) try: self.assertEqualArray(np_fct(X), got['Y'], decimal=5) except AssertionError as e: raise AssertionError( 'onnx.opset={} op_version={}\n--ONNX--\n{}\n--NAMES--\n{}'.format( get_opset_number_from_onnx(), op_version, model_def, all_names)) from e # inplace oinf = OnnxInference(model_def, input_inplace=False, inplace=True) got = oinf.run({'X': X}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(np_fct(X), got['Y'], decimal=5) # inplace2 onx2 = OnnxIdentity( onnx_cl('X', op_version=op_version), output_names=['Y'], op_version=op_version) model_def2 = onx2.to_onnx( {'X': X.astype(numpy.float32)}, target_opset=op_version, outputs=outputs) oinf = OnnxInference(model_def2, input_inplace=False, inplace=True) got = oinf.run({'X': X}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(np_fct(X), got['Y'], decimal=5) # input inplace expe = np_fct(X) oinf = OnnxInference(model_def, input_inplace=True, inplace=True) got = oinf.run({'X': X}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(expe, got['Y'], decimal=5) # sparse if do_sparse: row = numpy.array([0, 0, 1, 3, 1]) col = numpy.array([0, 2, 1, 3, 1]) data = numpy.array([1, 1, 1, 1, 1]) X = make_coo_matrix((data, (row.astype(numpy.int64), col.astype(numpy.int64))), shape=(4, 4), dtype=numpy.float32) try: exp = np_fct(X) except (TypeError, NotImplementedError, ValueError) as e: # Function np_fct does not work on sparse data. sparse_no_numpy.append((onnx_cl.__name__, op_version, e)) return model_def_sparse = onx.to_onnx( {'X': X.astype(numpy.float32)}, target_opset=op_version) oinf = OnnxInference( model_def_sparse, input_inplace=False, inplace=True) got = oinf.run({'X': X}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualSparseArray(exp, got['Y'], decimal=5) sparse_support.append(('UnOp', op_version, onnx_cl.__name__)) @ignore_warnings(category=(RuntimeWarning, DeprecationWarning, SparseEfficiencyWarning, PendingDeprecationWarning)) def common_test_onnxt_runtime_binary(self, onnx_cl, np_fct, dtype=numpy.float32, op_version=None, debug=False, raise_shape=False): if op_version is None: op_version = get_opset_number_from_onnx() idi = numpy.identity(2, dtype=dtype) onx = onnx_cl('X', idi, output_names=['Y'], op_version=op_version) X = numpy.array([[1, 2], [3, -4]], dtype=numpy.float64) model_def = onx.to_onnx({'X': X.astype(dtype)}, target_opset=op_version) oinf = OnnxInference(model_def) if debug: got = oinf.run({'X': X.astype(dtype)}, verbose=1, fLOG=print) else: got = oinf.run({'X': X.astype(dtype)}) self.assertEqual(list(sorted(got)), ['Y']) self.common_expected_shapes_types( oinf, {'X': X.astype(dtype)}, got, onnx_cl, model_def, raise_shape=raise_shape) exp = np_fct(X, idi) self.assertEqualArray(exp, got['Y'], decimal=5) # python code python_tested.append(onnx_cl) oinfpy = OnnxInference(model_def, runtime="python", inplace=True) validate_python_inference(oinfpy, {'X': X.astype(dtype)}) # sparse idi = make_coo_matrix(numpy.identity(2)).astype(numpy.float32) X = make_coo_matrix(numpy.array( [[0, 2], [3, -4]], dtype=numpy.float32)) try: exp = np_fct(X, idi) except (TypeError, NotImplementedError, ValueError) as e: # Function np_fct does not work on sparse data. sparse_no_numpy.append((onnx_cl.__name__, op_version, e)) return onx = onnx_cl('X', idi, output_names=['Y'], op_version=op_version) model_def_sparse = onx.to_onnx({'X': X}, target_opset=op_version) try: oinf = OnnxInference( model_def_sparse, input_inplace=False, inplace=True) except RuntimeError as e: raise RuntimeError( "Unable to load sparse model\n{}".format( model_def_sparse)) from e if debug: got = oinf.run({'X': X}, verbose=1, fLOG=print) else: got = oinf.run({'X': X}) self.assertEqual(list(sorted(got)), ['Y']) if isinstance(exp, (coo_matrix, csr_matrix)): self.assertEqualSparseArray(exp, got['Y'], decimal=5) elif isinstance(exp, numpy.ndarray): self.assertEqualArray(exp, got['Y'], decimal=5) else: self.assertEqual(exp, got['Y']) sparse_support.append(('BinOp', op_version, onnx_cl.__name__)) @wraplog() def test_onnxt_runtime_abs(self): self.common_test_onnxt_runtime_unary(OnnxAbs, numpy.abs) @wraplog() def test_onnxt_runtime_abs_debug(self): f = StringIO() with redirect_stdout(f): self.common_test_onnxt_runtime_unary( OnnxAbs, numpy.abs, debug=True) @wraplog() def test_onnxt_runtime_acos(self): self.common_test_onnxt_runtime_unary(OnnxAcos, numpy.arccos) @wraplog() def test_onnxt_runtime_acosh(self): self.common_test_onnxt_runtime_unary(OnnxAcosh, numpy.arccosh) @wraplog() def test_onnxt_runtime_add(self): self.common_test_onnxt_runtime_binary(OnnxAdd, numpy.add) @wraplog() def test_onnxt_runtime_and(self): self.common_test_onnxt_runtime_binary( OnnxAnd, numpy.logical_and, dtype=numpy.bool_) @wraplog() def test_onnxt_runtime_argmax(self): opsets = list(range(11, get_opset_number_from_onnx() + 1)) opsets = ['11only'] + opsets for opset in opsets: with self.subTest(opset=opset): X = numpy.array([[2, 1], [0, 1]], dtype=float) if opset == '11only': clarg = OnnxArgMax_11 opset = 11 br = True else: clarg = OnnxArgMax br = False onx = clarg('X', output_names=['Y'], keepdims=0, op_version=opset) model_def = onx.to_onnx({'X': X.astype(numpy.float32)}, target_opset=opset) oinf = OnnxInference(model_def) got = oinf.run({'X': X}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(numpy.argmax( X, axis=0), got['Y'], decimal=5) self.common_expected_shapes_types( oinf, {'X': X}, got, clarg, model_def) if br: continue oinfpy = OnnxInference( model_def, runtime="python", inplace=True) validate_python_inference( oinfpy, {'X': X.astype(numpy.float32)}) onx = OnnxArgMax('X', output_names=['Y'], axis=1, keepdims=0, op_version=opset) model_def = onx.to_onnx({'X': X.astype(numpy.float32)}, target_opset=opset) oinf = OnnxInference(model_def) got = oinf.run({'X': X}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(numpy.argmax(X, axis=1).ravel(), got['Y'].ravel()) onx = OnnxArgMax('X', output_names=['Y'], axis=1, keepdims=1, op_version=opset) model_def = onx.to_onnx({'X': X.astype(numpy.float32)}, target_opset=opset) oinf = OnnxInference(model_def) got = oinf.run({'X': X}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(numpy.argmax(X, axis=1).ravel(), got['Y'].ravel()) # sparse X = make_coo_matrix(X, dtype=numpy.float32) try: exp = numpy.argmax(X, axis=1) except (TypeError, NotImplementedError, ValueError) as e: # Function np_fct does not work on sparse data. sparse_no_numpy.append((OnnxArgMax.__name__, None, e)) return model_def_sparse = onx.to_onnx({'X': X}, target_opset=opset) oinf = OnnxInference(model_def_sparse, input_inplace=False) got = oinf.run({'X': X}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(exp, got['Y'], decimal=5) X = numpy.array([[2, 1], [0, 1]], dtype=float) sparse_support.append(('UnOp', None, OnnxArgMax.__name__)) python_tested.append(OnnxArgMax) @unittest.skipIf(onnx_opset_version() < 12, reason="needs onnx 1.7.0") @wraplog() def test_onnxt_runtime_argmax_12(self): self.assertGreater(onnx_opset_version(), 12) from skl2onnx.algebra.onnx_ops import OnnxArgMax_12 # pylint: disable=E0611 X = numpy.array([[2, 2, 1], [0, 1, 1]], dtype=float) onx = OnnxArgMax_12('X', output_names=['Y'], keepdims=0, axis=1, select_last_index=1, op_version=12) model_def = onx.to_onnx({'X': X.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': X}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(numpy.array([1, 2], dtype=numpy.int64), got['Y'], decimal=5) self.common_expected_shapes_types( oinf, {'X': X}, got, OnnxArgMax_12, model_def) @wraplog() def test_onnxt_runtime_argmin(self): opsets = list(range(11, get_opset_number_from_onnx() + 1)) opsets = ['11only'] + opsets for opset in opsets: with self.subTest(opset=opset): if opset == '11only': clarg = OnnxArgMin_11 opset = 11 br = True else: clarg = OnnxArgMin br = False X = numpy.array([[2, 1], [0, 1]], dtype=float) onx = clarg('X', output_names=['Y'], keepdims=0, op_version=opset) model_def = onx.to_onnx({'X': X.astype(numpy.float32)}, target_opset=opset) oinf = OnnxInference(model_def) got = oinf.run({'X': X}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(numpy.argmin( X, axis=0), got['Y'], decimal=5) if br: continue oinfpy = OnnxInference( model_def, runtime="python", inplace=True) validate_python_inference( oinfpy, {'X': X.astype(numpy.float32)}) self.common_expected_shapes_types( oinfpy, {'X': X.astype(numpy.float32)}, got, clarg, model_def) onx = OnnxArgMin('X', output_names=['Y'], axis=1, keepdims=0, op_version=opset) model_def = onx.to_onnx({'X': X.astype(numpy.float32)}, target_opset=opset) oinf = OnnxInference(model_def) got = oinf.run({'X': X}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(numpy.argmin(X, axis=1).ravel(), got['Y'].ravel()) onx = OnnxArgMin('X', output_names=['Y'], axis=1, keepdims=1, op_version=opset) model_def = onx.to_onnx({'X': X.astype(numpy.float32)}, target_opset=opset) oinf = OnnxInference(model_def) got = oinf.run({'X': X}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(numpy.argmin(X, axis=1).ravel(), got['Y'].ravel()) # sparse X = make_coo_matrix(X, dtype=numpy.float32) try: exp = numpy.argmin(X, axis=1) except (TypeError, NotImplementedError, ValueError) as e: # Function np_fct does not work on sparse data. sparse_no_numpy.append((OnnxArgMin.__name__, None, e)) return model_def_sparse = onx.to_onnx({'X': X}, target_opset=opset) oinf = OnnxInference(model_def_sparse, input_inplace=False) got = oinf.run({'X': X}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(exp, got['Y'], decimal=5) sparse_support.append(('UnOp', None, OnnxArgMin.__name__)) python_tested.append(OnnxArgMin) @unittest.skipIf(onnx_opset_version() < 12, reason="needs onnx 1.7.0") @wraplog() def test_onnxt_runtime_argmin_12(self): self.assertGreater(onnx_opset_version(), 12) from skl2onnx.algebra.onnx_ops import OnnxArgMin_12 # pylint: disable=E0611 X = numpy.array([[2, 1, 1], [0, 0, 1]], dtype=float) onx = OnnxArgMin_12('X', output_names=['Y'], keepdims=0, axis=1, select_last_index=1, op_version=12) model_def = onx.to_onnx({'X': X.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': X}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(numpy.array([2, 1], dtype=numpy.int64), got['Y'], decimal=5) self.common_expected_shapes_types( oinf, {'X': X}, got, OnnxArgMin_12, model_def) @wraplog() def test_onnxt_runtime_asin(self): self.common_test_onnxt_runtime_unary(OnnxAsin, numpy.arcsin) @wraplog() def test_onnxt_runtime_asinh(self): self.common_test_onnxt_runtime_unary(OnnxAsinh, numpy.arcsinh) @wraplog() def test_onnxt_runtime_atan(self): self.common_test_onnxt_runtime_unary(OnnxAtan, numpy.arctan) @wraplog() def test_onnxt_runtime_atanh(self): self.common_test_onnxt_runtime_unary(OnnxAtanh, numpy.arctanh) @wraplog() def test_onnxt_runtime_atan2(self): test_pairs = [[y, x] for x in [3., -4., 0., -1., 1.] for y in [5., -6., 0., -1., 1.]] y_val = numpy.array([y for y, x in test_pairs], dtype=numpy.float32) x_val = numpy.array([x for y, x in test_pairs], dtype=numpy.float32) def atan2(y, x): # size: 100000 # timeit arctan: 0.00205 # timeit arctan2: 0.00361 # timeit atan2: 0.00599 sx = numpy.sign(x) sy = numpy.sign(y) pi_part = (sy + sx * (sy ** 2 - 1)) * (sx - 1) * (-numpy.pi / 2) atan_part = numpy.arctan(y / (x + (1 - sx ** 2))) * sx ** 2 return atan_part + pi_part self.assertEqualArray( numpy.arctan2(y_val, x_val), atan2(y_val, x_val), decimal=5) def _expect_average_pool(self, node, inputs, outputs, opset=None): if opset is None: opset = get_opset_number_from_onnx() ginputs = [ onnx.helper.make_tensor_value_info( node.input[0], TensorProto.FLOAT, []), # pylint: disable=E1101, ] goutputs = [ onnx.helper.make_tensor_value_info( node.output[0], TensorProto.FLOAT, []), # pylint: disable=E1101, ] model_def = onnx.helper.make_model( opset_imports=[onnx.helper.make_operatorsetid('', opset)], graph=onnx.helper.make_graph( name='test_average_pool', inputs=ginputs, outputs=goutputs, nodes=[node])) oinf = OnnxInference(model_def) got = oinf.run({n: v for n, v in zip(node.input, inputs)}) self.assertEqual(len(got), 1) self.assertEqualArray(outputs[0], got['y']) @wraplog() def test_onnxt_runtime_average_pool(self): node = onnx.helper.make_node( 'AveragePool', inputs=['x'], outputs=['y'], kernel_shape=[2, 2], auto_pad='SAME_UPPER') x = numpy.random.randn(1, 3, 32, 32).astype(numpy.float32) x_shape = numpy.shape(x) kernel_shape = (2, 2) strides = (1, 1) out_shape = _get_output_shape( 'SAME_UPPER', x_shape[2:], kernel_shape, strides) pad_shape = _get_pad_shape( 'SAME_UPPER', x_shape[2:], kernel_shape, strides, out_shape) pad_top = pad_shape[0] // 2 pad_bottom = pad_shape[0] - pad_top pad_left = pad_shape[1] // 2 pad_right = pad_shape[1] - pad_left padded = numpy.pad( x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant', constant_values=numpy.nan) y = _pool( padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'AVG') self._expect_average_pool(node, inputs=[x], outputs=[y]) node = onnx.helper.make_node( 'AveragePool', inputs=['x'], outputs=['y'], kernel_shape=[3, 3], pads=[2, 2, 2, 2], count_include_pad=1) x = numpy.random.randn(1, 3, 28, 28).astype(numpy.float32) x_shape = numpy.shape(x) kernel_shape = (3, 3) strides = (1, 1) pad_bottom = 2 pad_top = 2 pad_right = 2 pad_left = 2 pad_shape = [pad_top + pad_bottom, pad_left + pad_right] out_shape = _get_output_shape( 'VALID', numpy.add(x_shape[2:], pad_shape), kernel_shape, strides) padded = numpy.pad( x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant', constant_values=0) y = _pool( padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'AVG', count_include_pad=1) self._expect_average_pool(node, inputs=[x], outputs=[y]) node = onnx.helper.make_node( 'AveragePool', inputs=['x'], outputs=['y'], kernel_shape=[2, 2], auto_pad='SAME_LOWER') x = numpy.random.randn(1, 3, 32, 32).astype(numpy.float32) x_shape = numpy.shape(x) kernel_shape = (2, 2) strides = (1, 1) out_shape = _get_output_shape( 'SAME_LOWER', x_shape[2:], kernel_shape, strides) pad_shape = _get_pad_shape( 'SAME_LOWER', x_shape[2:], kernel_shape, strides, out_shape) pad_bottom = pad_shape[0] // 2 pad_top = pad_shape[0] - pad_bottom pad_right = pad_shape[1] // 2 pad_left = pad_shape[1] - pad_right padded = numpy.pad( x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant', constant_values=numpy.nan) y = _pool( padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'AVG') self._expect_average_pool(node, inputs=[x], outputs=[y]) node = onnx.helper.make_node( 'AveragePool', inputs=['x'], outputs=['y'], kernel_shape=[3, 3], pads=[2, 2, 2, 2]) x = numpy.random.randn(1, 3, 28, 28).astype(numpy.float32) x_shape = numpy.shape(x) kernel_shape = (3, 3) strides = (1, 1) pad_bottom = 2 pad_top = 2 pad_right = 2 pad_left = 2 pad_shape = [pad_top + pad_bottom, pad_left + pad_right] out_shape = _get_output_shape( 'VALID', numpy.add(x_shape[2:], pad_shape), kernel_shape, strides) padded = numpy.pad( x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant', constant_values=numpy.nan) y = _pool( padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'AVG') self._expect_average_pool(node, inputs=[x], outputs=[y]) node = onnx.helper.make_node( 'AveragePool', inputs=['x'], outputs=['y'], kernel_shape=[2]) x = numpy.random.randn(1, 3, 32).astype(numpy.float32) x_shape = numpy.shape(x) kernel_shape = [2] strides = [1] out_shape = _get_output_shape( 'VALID', x_shape[2:], kernel_shape, strides) padded = x y = _pool(padded, x_shape, kernel_shape, strides, out_shape, [0], 'AVG') self._expect_average_pool(node, inputs=[x], outputs=[y]) node = onnx.helper.make_node( 'AveragePool', inputs=['x'], outputs=['y'], kernel_shape=[2, 2]) x = numpy.random.randn(1, 3, 32, 32).astype(numpy.float32) x_shape = numpy.shape(x) kernel_shape = (2, 2) strides = (1, 1) out_shape = _get_output_shape( 'VALID', x_shape[2:], kernel_shape, strides) padded = x y = _pool( padded, x_shape, kernel_shape, strides, out_shape, (0, 0), 'AVG') self._expect_average_pool(node, inputs=[x], outputs=[y]) node = onnx.helper.make_node( 'AveragePool', inputs=['x'], outputs=['y'], kernel_shape=[5, 5], strides=[3, 3]) x = numpy.random.randn(1, 3, 32, 32).astype(numpy.float32) x_shape = numpy.shape(x) kernel_shape = (5, 5) strides = (3, 3) out_shape = _get_output_shape( 'VALID', x_shape[2:], kernel_shape, strides) padded = x y = _pool( padded, x_shape, kernel_shape, strides, out_shape, (0, 0), 'AVG') self._expect_average_pool(node, inputs=[x], outputs=[y]) node = onnx.helper.make_node( 'AveragePool', inputs=['x'], outputs=['y'], kernel_shape=[2, 2, 2]) x = numpy.random.randn(1, 3, 32, 32, 32).astype(numpy.float32) x_shape = numpy.shape(x) kernel_shape = [2, 2, 2] strides = [1, 1, 1] out_shape = _get_output_shape( 'VALID', x_shape[2:], kernel_shape, strides) padded = x y = _pool( padded, x_shape, kernel_shape, strides, out_shape, [0, 0, 0], 'AVG') self._expect_average_pool(node, inputs=[x], outputs=[y]) python_tested.append(OnnxAveragePool) @wraplog() @unittest.skipIf(True, "not implemented yet") def test_onnxt_runtime_average_pool_ceil(self): node = onnx.helper.make_node( 'AveragePool', inputs=['x'], outputs=['y'], kernel_shape=[3, 3], strides=[2, 2], ceil_mode=True) x = numpy.array([[[ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]]]).astype(numpy.float32) y = numpy.array([[[ [6, 7.5], [12, 13.5]]]]).astype(numpy.float32) self._expect_average_pool(node, inputs=[x], outputs=[y]) @wraplog() def test_onnxt_runtime_average_pool_big(self): with self.subTest(name='test_averagepool_2d_precomputed_pads'): node = onnx.helper.make_node( 'AveragePool', inputs=['x'], outputs=['y'], kernel_shape=[5, 5], pads=[2, 2, 2, 2]) x = numpy.array([[[ [1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15], [16, 17, 18, 19, 20], [21, 22, 23, 24, 25]]]]).astype(numpy.float32) y = numpy.array([[[[7, 7.5, 8, 8.5, 9], [9.5, 10, 10.5, 11, 11.5], [12, 12.5, 13, 13.5, 14], [14.5, 15, 15.5, 16, 16.5], [17, 17.5, 18, 18.5, 19]]]]).astype(numpy.float32) self._expect_average_pool(node, inputs=[x], outputs=[y]) with self.subTest(name='test_averagepool_2d_precomputed_pads_count_include_pad'): node = onnx.helper.make_node( 'AveragePool', inputs=['x'], outputs=['y'], kernel_shape=[5, 5], pads=[2, 2, 2, 2], count_include_pad=1) x = numpy.array([[[ [1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15], [16, 17, 18, 19, 20], [21, 22, 23, 24, 25]]]]).astype(numpy.float32) y = numpy.array([[[[2.5200, 3.6000, 4.8000, 4.0800, 3.2400], [4.5600, 6.4000, 8.4000, 7.0400, 5.5200], [7.2000, 10.0000, 13.0000, 10.8000, 8.4000], [6.9600, 9.6000, 12.4000, 10.2400, 7.9200], [6.1200, 8.4000, 10.8000, 8.8800, 6.8400]]]]).astype(numpy.float32) self._expect_average_pool(node, inputs=[x], outputs=[y]) with self.subTest(name='test_averagepool_2d_precomputed_same_upper'): node = onnx.helper.make_node( 'AveragePool', inputs=['x'], outputs=['y'], kernel_shape=[3, 3], strides=[2, 2], auto_pad='SAME_UPPER') x = numpy.array([[[ [1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15], [16, 17, 18, 19, 20], [21, 22, 23, 24, 25]]]]).astype(numpy.float32) y = numpy.array([[[[4, 5.5, 7], [11.5, 13, 14.5], [19, 20.5, 22]]]]).astype(numpy.float32) self._expect_average_pool(node, inputs=[x], outputs=[y]) with self.subTest(name='test_averagepool_2d_precomputed_strides'): node = onnx.helper.make_node( 'AveragePool', inputs=['x'], outputs=['y'], kernel_shape=[2, 2], strides=[2, 2]) x = numpy.array([[[ [1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15], [16, 17, 18, 19, 20], [21, 22, 23, 24, 25]]]]).astype(numpy.float32) y = numpy.array([[[[4, 6], [14, 16]]]]).astype(numpy.float32) self._expect_average_pool(node, inputs=[x], outputs=[y]) @wraplog() def test_onnxt_runtime_batch_normalization(self): # input size: (1, 2, 1, 3) x = numpy.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(numpy.float32) s = numpy.array([1.0, 1.5]).astype(numpy.float32) bias = numpy.array([0, 1]).astype(numpy.float32) mean = numpy.array([0, 3]).astype(numpy.float32) var = numpy.array([1, 1.5]).astype(numpy.float32) y = _batchnorm_test_mode(x, s, bias, mean, var).astype(numpy.float32) onx = OnnxBatchNormalization( 'X', s, bias, mean, var, output_names=['Y'], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': x}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(y, got['Y']) self.common_expected_shapes_types( oinf, {'X': x}, got, OnnxBatchNormalization, model_def) # input size: (2, 3, 4, 5) x = numpy.random.randn(2, 3, 4, 5).astype(numpy.float32) s = numpy.random.randn(3).astype(numpy.float32) bias = numpy.random.randn(3).astype(numpy.float32) mean = numpy.random.randn(3).astype(numpy.float32) var = numpy.random.rand(3).astype(numpy.float32) epsilon = 1e-2 y = _batchnorm_test_mode( x, s, bias, mean, var, epsilon).astype(numpy.float32) onx = OnnxBatchNormalization( 'X', s, bias, mean, var, output_names=['Y'], epsilon=epsilon, op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': x}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(y, got['Y']) python_tested.append(OnnxBatchNormalization) @wraplog() def test_onnxt_runtime_batch_normalization_training_fct(self): x = numpy.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(numpy.float32) s = numpy.array([1.0, 1.5]).astype(numpy.float32) bias = numpy.array([0, 1]).astype(numpy.float32) mean = numpy.array([0, 3]).astype(numpy.float32) var = numpy.array([1, 1.5]).astype(numpy.float32) y, scale, bias, mean, var = ( _batchnorm_training_mode(x, s, bias, mean, var)) self.assertEqualArray( numpy.array([[[[-1.2247356, 0., 1.2247356]], [[-0.8371035, 1., 2.8371034]]]], dtype=numpy.float32), y) self.assertEqualArray( numpy.array([0., 3.], dtype=numpy.float32), scale) self.assertEqualArray( numpy.array([0.6666667, 0.6666667], dtype=numpy.float32), bias) self.assertEqualArray( numpy.array([0., 2.9999998], dtype=numpy.float32), mean) self.assertEqualArray( numpy.array([0.96666664, 1.4166666], dtype=numpy.float32), var) @wraplog() @unittest.skipIf(OnnxBatchNormalization_14 is None, reason="onnx too old") def test_onnxt_runtime_batch_normalization_training(self): # input size: (1, 2, 1, 3) x = numpy.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(numpy.float32) s = numpy.array([1.0, 1.5]).astype(numpy.float32) bias = numpy.array([0, 1]).astype(numpy.float32) mean = numpy.array([0, 3]).astype(numpy.float32) var = numpy.array([1, 1.5]).astype(numpy.float32) y, scale, bias, mean, var = ( _batchnorm_training_mode(x, s, bias, mean, var)) onx = OnnxBatchNormalization_14( 'X', s, bias, mean, var, output_names=['Y', 'scale', 'bias', 'mean', 'var'], training_mode=1, op_version=14) try: model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, target_opset=14) except RuntimeError as e: if "Shape inference fails" in str(e): warnings.warn(str(e)) return raise e oinf = OnnxInference(model_def) got = oinf.run({'X': x}) self.assertEqual( list(sorted(got)), ['Y', 'bias', 'mean', 'scale', 'var']) self.assertEqualArray(scale, got['scale']) self.assertEqualArray(bias, got['bias']) self.assertEqualArray(mean, got['mean']) # self.assertEqualArray(var, got['var']) # self.assertEqualArray(y, got['Y']) self.assertNotEmpty(y) self.assertNotEmpty(var) @wraplog() def test_onnxt_runtime_cast_out(self): x = numpy.array([1., 2., 3., 4., 5., 6.]).astype( numpy.float32) # pylint: disable=E1101 dest = [(TensorProto.FLOAT, numpy.float32, FloatTensorType), # pylint: disable=E1101 (TensorProto.DOUBLE, numpy.float64, # pylint: disable=E1101 DoubleTensorType), # pylint: disable=E1101 (TensorProto.INT32, numpy.int32, # pylint: disable=E1101 Int32TensorType), # pylint: disable=E1101 (TensorProto.INT64, numpy.int64, # pylint: disable=E1101 Int64TensorType), # pylint: disable=E1101 (TensorProto.INT8, numpy.int8, # pylint: disable=E1101 Int8TensorType), # pylint: disable=E1101 (TensorProto.INT16, numpy.int16, # pylint: disable=E1101 Int16TensorType), # pylint: disable=E1101 (TensorProto.UINT8, numpy.uint8, # pylint: disable=E1101 UInt8TensorType), # pylint: disable=E1101 (TensorProto.UINT32, numpy.uint32, # pylint: disable=E1101 UInt32TensorType), # pylint: disable=E1101 (TensorProto.UINT16, numpy.uint16, # pylint: disable=E1101 UInt16TensorType), # pylint: disable=E1101 (TensorProto.UINT64, numpy.uint64, # pylint: disable=E1101 UInt64TensorType), # pylint: disable=E1101 (TensorProto.FLOAT16, numpy.float16, # pylint: disable=E1101 Float16TensorType), # pylint: disable=E1101 (TensorProto.BOOL, numpy.bool_, # pylint: disable=E1101 BooleanTensorType), # pylint: disable=E1101 (TensorProto.STRING, numpy.str_, StringTensorType), ] # pylint: disable=E1101 for opset in range(9, get_opset_number_from_onnx() + 1): for to, nptp, outp in dest: if nptp == numpy.bool_: self.assertIn(proto2dtype(to), (nptp, bool)) elif nptp == numpy.str_: self.assertIn(proto2dtype(to), (nptp, str)) else: self.assertEqual(proto2dtype(to), nptp) self.assertEqual(to, guess_proto_dtype(nptp)) self.assertNotEmpty(_elem_type_as_str(to)) with self.subTest(opset=opset, to=to): onx = OnnxCast('X', to=to, output_names=['Y'], op_version=opset) model_def = onx.to_onnx( {'X': x}, outputs=[('Y', outp())], target_opset=opset) oinf = OnnxInference(model_def) got = oinf.run({'X': x}) if nptp == numpy.str_: self.assertEqual( x.astype(nptp).tolist(), got['Y'].tolist()) else: self.assertEqualArray(x.astype(nptp), got['Y']) self.common_expected_shapes_types( oinf, {'X': x}, got, OnnxCast, model_def) python_tested.append(OnnxCast) @wraplog() def test_onnxt_runtime_cast_in(self): x = numpy.array([1., 2., 3., 4., 5., 6.]).astype( numpy.float32) # pylint: disable=E1101 dest = [(TensorProto.FLOAT, numpy.float32, FloatTensorType), # pylint: disable=E1101 (TensorProto.DOUBLE, numpy.float64, # pylint: disable=E1101 DoubleTensorType), # pylint: disable=E1101 (TensorProto.INT32, numpy.int32, # pylint: disable=E1101 Int32TensorType), # pylint: disable=E1101 (TensorProto.INT64, numpy.int64, # pylint: disable=E1101 Int64TensorType), # pylint: disable=E1101 (TensorProto.INT8, numpy.int8, # pylint: disable=E1101 Int8TensorType), # pylint: disable=E1101 (TensorProto.INT16, numpy.int16, # pylint: disable=E1101 Int16TensorType), # pylint: disable=E1101 (TensorProto.UINT8, numpy.uint8, # pylint: disable=E1101 UInt8TensorType), # pylint: disable=E1101 (TensorProto.UINT32, numpy.uint32, # pylint: disable=E1101 UInt32TensorType), # pylint: disable=E1101 (TensorProto.UINT16, numpy.uint16, # pylint: disable=E1101 UInt16TensorType), # pylint: disable=E1101 (TensorProto.UINT64, numpy.uint64, # pylint: disable=E1101 UInt64TensorType), # pylint: disable=E1101 (TensorProto.FLOAT16, numpy.float16, # pylint: disable=E1101 Float16TensorType), # pylint: disable=E1101 (TensorProto.BOOL, numpy.bool_, # pylint: disable=E1101 BooleanTensorType), # pylint: disable=E1101 (TensorProto.STRING, numpy.str_, StringTensorType), ] # pylint: disable=E1101 for opset in range(9, get_opset_number_from_onnx() + 1): for to, nptp, _ in dest: if nptp == numpy.bool_: self.assertIn(proto2dtype(to), (nptp, bool)) elif nptp == numpy.str_: self.assertIn(proto2dtype(to), (nptp, str)) else: self.assertEqual(proto2dtype(to), nptp) self.assertEqual(to, guess_proto_dtype(nptp)) self.assertNotEmpty(_elem_type_as_str(to)) with self.subTest(opset=opset, to=to): xi = x.astype(nptp) onx = OnnxCast('X', to=TensorProto.STRING, # pylint: disable=E1101 output_names=['Y'], op_version=opset) model_def = onx.to_onnx( {'X': xi}, outputs=[('Y', StringTensorType())], target_opset=opset) got = OnnxInference(model_def).run({'X': xi}) self.assertEqual( xi.astype(str).tolist(), got['Y'].tolist()) python_tested.append(OnnxCast) @wraplog() def test_onnxt_runtime_ceil(self): self.common_test_onnxt_runtime_unary(OnnxCeil, numpy.ceil) @unittest.skipIf(OnnxCelu is None, reason="onnx too recent") @wraplog() def test_onnxt_runtime_celu1(self): self.common_test_onnxt_runtime_unary( OnnxCelu, _vcelu1, op_version=12, outputs=[('Y', FloatTensorType([None, 2]))]) @unittest.skipIf(OnnxCelu is None, reason="onnx too recent") @wraplog() def test_onnxt_runtime_celu2(self): _vcelu2 = numpy.vectorize( lambda x: pycelu(x, 1.), otypes=[numpy.float]) self.common_test_onnxt_runtime_unary( OnnxCelu, _vcelu2, op_version=12, outputs=[('Y', FloatTensorType([None, 2]))]) @unittest.skipIf(onnx_opset_version() < 11, reason="Explicitely tests Clip >= 11") @wraplog() def test_onnxt_runtime_clip(self): self.common_test_onnxt_runtime_unary( lambda x, output_names=None, op_version=None: OnnxClip( x, numpy.array([0], dtype=numpy.float32), output_names=output_names, op_version=op_version), lambda x: numpy.clip(x, 0, 1e5)) self.common_test_onnxt_runtime_unary( lambda x, output_names=None, op_version=None: OnnxClip( x, numpy.array([-1000], dtype=numpy.float32), numpy.array([0], dtype=numpy.float32), op_version=op_version, output_names=output_names), lambda x: numpy.clip(x, -1e5, 0)) self.common_test_onnxt_runtime_unary( lambda x, output_names=None, op_version=None: OnnxClip( x, numpy.array([0.1], dtype=numpy.float32), numpy.array([2.1], dtype=numpy.float32), output_names=output_names, op_version=op_version), lambda x: numpy.clip(x, 0.1, 2.1)) python_tested.append(OnnxClip) @wraplog() def test_onnxt_runtime_compress(self): # axis is None x = numpy.array([1., 2., 3., 4., 5., 6.]).astype(numpy.float32) x = x.reshape((-1, 2)) cond = numpy.array([False, True, False]) onx = OnnxCompress('X', 'cond', output_names=['Y'], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x, 'cond': cond}, outputs=[('Y', FloatTensorType())], target_opset=get_opset_number_from_onnx()) exp = numpy.compress(cond, x) oinf = OnnxInference(model_def) got = oinf.run({'X': x, 'cond': cond}) self.assertEqualArray(exp, got['Y']) self.common_expected_shapes_types( oinf, {'X': x, 'cond': cond}, got, OnnxCompress, model_def) python_tested.append(OnnxCompress) @wraplog() def test_onnxt_runtime_clip_10(self): from skl2onnx.algebra.onnx_ops import OnnxClip_6 # pylint: disable=E0611 self.common_test_onnxt_runtime_unary( lambda x, output_names=None, op_version=10: OnnxClip_6( x, min=1e-5, max=1e5, output_names=output_names, op_version=10), lambda x: numpy.clip(x, 1e-5, 1e5), op_version=10) self.common_test_onnxt_runtime_unary( lambda x, output_names=None, op_version=10: OnnxClip( x, min=1e-5, max=1e5, output_names=output_names, op_version=10), lambda x: numpy.clip(x, 1e-5, 1e5), op_version=10) self.common_test_onnxt_runtime_unary( lambda x, output_names=None, op_version=10: OnnxClip( x, max=1e-5, output_names=output_names, op_version=10), lambda x: numpy.clip(x, -1e5, 1e-5), op_version=10) self.common_test_onnxt_runtime_unary( lambda x, output_names=None, op_version=10: OnnxClip( x, min=0.1, max=2.1, output_names=output_names, op_version=10), lambda x: numpy.clip(x, 0.1, 2.1), op_version=10) @wraplog() def test_onnxt_runtime_concat(self): cst = numpy.array([[1, 2]], dtype=numpy.float32) onx = OnnxConcat('X', 'Y', cst, output_names=['Z'], op_version=get_opset_number_from_onnx()) X = numpy.array([[1, 2], [3, 4]], dtype=numpy.float64) Y = numpy.array([[8, 9], [10, 11], [12, 13]], dtype=numpy.float64) model_def = onx.to_onnx({'X': X.astype(numpy.float32), 'Y': Y.astype(numpy.float32)}, outputs=[('Z', FloatTensorType([2]))], target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': X.astype(numpy.float32), 'Y': Y.astype(numpy.float32)}) self.assertEqual(list(sorted(got)), ['Z']) self.assertEqual(got['Z'].shape, (6, 2)) exp = numpy.vstack([X, Y, cst]) self.assertEqualArray(exp, got['Z']) self.common_expected_shapes_types( oinf, {'X': X.astype(numpy.float32), 'Y': Y.astype(numpy.float32)}, got, OnnxConcat, model_def) python_tested.append(OnnxConstantOfShape) oinfpy = OnnxInference(model_def, runtime="python", inplace=True) validate_python_inference( oinfpy, {'X': X.astype(numpy.float32), 'Y': Y.astype(numpy.float32)}) python_tested.append(OnnxConcat) @wraplog() def test_onnxt_runtime_constant_of_shape(self): x = numpy.array([2, 2], dtype=numpy.int64) y = numpy.zeros((2, 2), dtype=numpy.float32) onx = OnnxConstantOfShape('X', output_names=['Y'], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x.astype(numpy.int64)}, outputs=[('Y', FloatTensorType())], target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': x.astype(numpy.int64)}) self.assertEqualArray(y, got['Y']) self.common_expected_shapes_types( oinf, {'X': x.astype(numpy.int64)}, got, OnnxConstantOfShape, model_def) python_tested.append(OnnxConstantOfShape) oinfpy = OnnxInference(model_def, runtime="python", inplace=True) validate_python_inference(oinfpy, {'X': x}) @wraplog() def test_onnxt_runtime_conv0(self): x = numpy.array([[[[0., 1., 2., 3., 4.], # (1, 1, 5, 5) input tensor [5., 6., 7., 8., 9.], [10., 11., 12., 13., 14.], [15., 16., 17., 18., 19.], [20., 21., 22., 23., 24.]]]]).astype(numpy.float32) W = numpy.array([[[[1., 1., 1.], # (1, 1, 3, 3) tensor for convolution weights [1., 1., 1.], [1., 1., 1.]]]]).astype(numpy.float32) # test 1 y_with_padding = numpy.array([[[[12., 21., 27., 33., 24.], # (1, 1, 5, 5) output tensor [33., 54., 63., 72., 51.], [63., 99., 108., 117., 81.], [93., 144., 153., 162., 111.], [72., 111., 117., 123., 84.]]]]).astype(numpy.float32) onx = OnnxConv( 'X', W, output_names=['Y'], kernel_shape=[3, 3], pads=[1, 1, 1, 1], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) for rt in ['python', 'onnxruntime1']: with self.subTest(runtime=rt): oinf = OnnxInference(model_def, runtime=rt) got = oinf.run({'X': x}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(y_with_padding, got['Y']) # test 2 y_without_padding = numpy.array([[[[54., 63., 72.], # (1, 1, 3, 3) output tensor [99., 108., 117.], [144., 153., 162.]]]]).astype(numpy.float32) onx = OnnxConv( 'X', W, output_names=['Y'], kernel_shape=[3, 3], pads=[0, 0, 0, 0], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) for rt in ['python', 'onnxruntime1']: with self.subTest(runtime=rt): oinf = OnnxInference(model_def, runtime=rt) got = oinf.run({'X': x}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(y_without_padding, got['Y']) if rt == 'python': self.common_expected_shapes_types( oinf, {'X': x}, got, OnnxConv, model_def) else: self.assertRaise( lambda: self.common_expected_shapes_types( oinf, {'X': x}, got, OnnxConv, model_def), RuntimeError) # test 3 y = numpy.array([[[[12., 27., 24.], [63., 108., 81.], [72., 117., 84.]]]]).astype(numpy.float32) onx = OnnxConv( 'X', W, output_names=['Y'], kernel_shape=[3, 3], auto_pad='SAME_LOWER', strides=[2, 2], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) for rt in ['python', 'onnxruntime1']: with self.subTest(runtime=rt): oinf = OnnxInference(model_def, runtime=rt) got = oinf.run({'X': x}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(y, got['Y']) python_tested.append(OnnxConv) @wraplog() def test_onnxt_runtime_conv1(self): x = numpy.array([[[[0., 1., 2., 3., 4.], [5., 6., 7., 8., 9.], [10., 11., 12., 13., 14.], [15., 16., 17., 18., 19.], [20., 21., 22., 23., 24.], [25., 26., 27., 28., 29.], [30., 31., 32., 33., 34.]]]]).astype(numpy.float32) W = numpy.array([[[[1., 1., 1.], # (1, 1, 3, 3) tensor for convolution weights [1., 1., 1.], [1., 1., 1.]]]]).astype(numpy.float32) # test 1 y_with_padding = numpy.array([[[[12., 27., 24.], # (1, 1, 4, 3) output tensor [63., 108., 81.], [123., 198., 141.], [112., 177., 124.]]]]).astype(numpy.float32) onx = OnnxConv( 'X', W, output_names=['Y'], kernel_shape=[3, 3], pads=[1, 1, 1, 1], strides=[2, 2], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) for rt in ['python', 'onnxruntime1']: with self.subTest(runtime=rt): oinf = OnnxInference(model_def, runtime=rt) got = oinf.run({'X': x}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(y_with_padding, got['Y']) # test 2 y_without_padding = numpy.array([[[[54., 72.], # (1, 1, 3, 2) output tensor [144., 162.], [234., 252.]]]]).astype(numpy.float32) onx = OnnxConv( 'X', W, output_names=['Y'], kernel_shape=[3, 3], pads=[0, 0, 0, 0], strides=[2, 2], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) for rt in ['python', 'onnxruntime1']: with self.subTest(runtime=rt): oinf = OnnxInference(model_def, runtime=rt) got = oinf.run({'X': x}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(y_without_padding, got['Y']) # test 3 y_with_asymmetric_padding = numpy.array([[[[21., 33.], # (1, 1, 4, 2) output tensor [99., 117.], [189., 207.], [171., 183.]]]]).astype(numpy.float32) onx = OnnxConv( 'X', W, output_names=['Y'], kernel_shape=[3, 3], pads=[1, 0, 1, 0], strides=[2, 2], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) for rt in ['python', 'onnxruntime1']: with self.subTest(runtime=rt): oinf = OnnxInference(model_def, runtime=rt) got = oinf.run({'X': x}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(y_with_asymmetric_padding, got['Y']) @wraplog() def test_onnxt_runtime_conv2_B(self): x = numpy.random.rand(1, 3, 5, 4).astype(numpy.float32) W = numpy.random.rand(4, 3, 3, 3).astype(numpy.float32) B = numpy.array([100, 700, 1000, 7000], dtype=numpy.float32) onx = OnnxConv( 'X', 'W', 'B', output_names=['Y'], kernel_shape=[3, 3], pads=[1, 1, 1, 1], strides=[2, 2], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x, 'W': W, 'B': B}, target_opset=get_opset_number_from_onnx()) ys = [] for rt in ['python', 'onnxruntime1']: oinf = OnnxInference(model_def, runtime=rt) got = oinf.run({'X': x, 'W': W, 'B': B}) ys.append(got['Y']) self.assertEqualArray(ys[0], ys[1], decimal=4) @wraplog() def test_onnxt_runtime_conv_transpose(self): x = numpy.array([[[[0., 1., 2.], # (1, 1, 3, 3) [3., 4., 5.], [6., 7., 8.]]]]).astype(numpy.float32) W = numpy.array([[[[1., 1., 1.], # (1, 2, 3, 3) [1., 1., 1.], [1., 1., 1.]], [[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]]]).astype(numpy.float32) y_with_padding = numpy.array([[[[0., 1., 3., 3., 2.], # (1, 2, 5, 5) [3., 8., 15., 12., 7.], [9., 21., 36., 27., 15.], [9., 20., 33., 24., 13.], [6., 13., 21., 15., 8.]], [[0., 1., 3., 3., 2.], [3., 8., 15., 12., 7.], [9., 21., 36., 27., 15.], [9., 20., 33., 24., 13.], [6., 13., 21., 15., 8.]]]]).astype(numpy.float32) onx = OnnxConvTranspose( 'X', W, output_names=['Y'], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': x}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(y_with_padding, got['Y']) python_tested.append(OnnxConv) @wraplog() def test_onnxt_runtime_conv_transpose_B(self): x = numpy.random.rand(1, 3, 5, 4).astype(numpy.float32) W = numpy.random.rand(3, 4, 3, 3).astype(numpy.float32) B = numpy.array([100, 700, 1000, 7000], dtype=numpy.float32) onx = OnnxConvTranspose( 'X', 'W', 'B', output_names=['Y'], kernel_shape=[3, 3], pads=[1, 1, 1, 1], strides=[2, 2], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x, 'W': W, 'B': B}, target_opset=get_opset_number_from_onnx()) ys = [] for rt in ['python', 'onnxruntime1']: oinf = OnnxInference(model_def, runtime=rt) got = oinf.run({'X': x, 'W': W, 'B': B}) ys.append(got['Y']) self.assertEqual(len(ys), 2) # self.assertEqualArray(ys[0], ys[1]) @wraplog() def test_onnxt_runtime_conv_transpose_1d(self): x = numpy.array([[[0., 1., 2.]]]).astype(numpy.float32) W = numpy.array([[[1., 1., 1.], # (1, 2, 3) [1., 1., 1.]]]).astype(numpy.float32) y_with_padding = numpy.array( [[[0., 1., 3., 3., 2.], # (1, 2, 5) [0., 1., 3., 3., 2.]]]).astype(numpy.float32) onx = OnnxConvTranspose( 'X', W, output_names=['Y'], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def, runtime="onnxruntime1") got = oinf.run({'X': x}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(y_with_padding, got['Y']) oinf = OnnxInference(model_def) got = oinf.run({'X': x}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(y_with_padding, got['Y']) python_tested.append(OnnxConvTranspose) @wraplog() def test_onnxt_runtime_conv_transpose_3d(self): x = numpy.arange(60).reshape((1, 1, 3, 4, 5)).astype(numpy.float32) W = numpy.ones((1, 2, 3, 3, 3)).astype(numpy.float32) y_with_padding = numpy.array( [[[[[0., 1., 3., 6., 9., 7., 4.], # (1, 2, 5, 6, 7) [5., 12., 21., 27., 33., 24., 13.], [15., 33., 54., 63., 72., 51., 27.], [30., 63., 99., 108., 117., 81., 42.], [25., 52., 81., 87., 93., 64., 33.], [15., 31., 48., 51., 54., 37., 19.]], [[20., 42., 66., 72., 78., 54., 28.], [50., 104., 162., 174., 186., 128., 66.], [90., 186., 288., 306., 324., 222., 114.], [120., 246., 378., 396., 414., 282., 144.], [90., 184., 282., 294., 306., 208., 106.], [50., 102., 156., 162., 168., 114., 58.]], [[60., 123., 189., 198., 207., 141., 72.], [135., 276., 423., 441., 459., 312., 159.], [225., 459., 702., 729., 756., 513., 261.], [270., 549., 837., 864., 891., 603., 306.], [195., 396., 603., 621., 639., 432., 219.], [105., 213., 324., 333., 342., 231., 117.]], [[60., 122., 186., 192., 198., 134., 68.], [130., 264., 402., 414., 426., 288., 146.], [210., 426., 648., 666., 684., 462., 234.], [240., 486., 738., 756., 774., 522., 264.], [170., 344., 522., 534., 546., 368., 186.], [90., 182., 276., 282., 288., 194., 98.]], [[40., 81., 123., 126., 129., 87., 44.], [85., 172., 261., 267., 273., 184., 93.], [135., 273., 414., 423., 432., 291., 147.], [150., 303., 459., 468., 477., 321., 162.], [105., 212., 321., 327., 333., 224., 113.], [55., 111., 168., 171., 174., 117., 59.]]], [[[0., 1., 3., 6., 9., 7., 4.], [5., 12., 21., 27., 33., 24., 13.], [15., 33., 54., 63., 72., 51., 27.], [30., 63., 99., 108., 117., 81., 42.], [25., 52., 81., 87., 93., 64., 33.], [15., 31., 48., 51., 54., 37., 19.]], [[20., 42., 66., 72., 78., 54., 28.], [50., 104., 162., 174., 186., 128., 66.], [90., 186., 288., 306., 324., 222., 114.], [120., 246., 378., 396., 414., 282., 144.], [90., 184., 282., 294., 306., 208., 106.], [50., 102., 156., 162., 168., 114., 58.]], [[60., 123., 189., 198., 207., 141., 72.], [135., 276., 423., 441., 459., 312., 159.], [225., 459., 702., 729., 756., 513., 261.], [270., 549., 837., 864., 891., 603., 306.], [195., 396., 603., 621., 639., 432., 219.], [105., 213., 324., 333., 342., 231., 117.]], [[60., 122., 186., 192., 198., 134., 68.], [130., 264., 402., 414., 426., 288., 146.], [210., 426., 648., 666., 684., 462., 234.], [240., 486., 738., 756., 774., 522., 264.], [170., 344., 522., 534., 546., 368., 186.], [90., 182., 276., 282., 288., 194., 98.]], [[40., 81., 123., 126., 129., 87., 44.], [85., 172., 261., 267., 273., 184., 93.], [135., 273., 414., 423., 432., 291., 147.], [150., 303., 459., 468., 477., 321., 162.], [105., 212., 321., 327., 333., 224., 113.], [55., 111., 168., 171., 174., 117., 59.]]]]]).astype(numpy.float32) onx = OnnxConvTranspose( 'X', W, output_names=['Y'], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': x}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(y_with_padding, got['Y']) @unittest.skipIf(True, reason="fails with output_shape") @wraplog() def test_onnxt_runtime_conv_transpose_output_shape(self): x = numpy.arange(9).reshape((1, 1, 3, 3)).astype(numpy.float32) W = numpy.ones((1, 2, 3, 3)).astype(numpy.float32) y_with_padding = numpy.array( [[[[0., 0., 1., 1., 3., 2., 2., 0.], # (1, 2, 10, 8) [0., 0., 1., 1., 3., 2., 2., 0.], [0., 0., 1., 1., 3., 2., 2., 0.], [3., 3., 7., 4., 9., 5., 5., 0.], [3., 3., 7., 4., 9., 5., 5., 0.], [3., 3., 7., 4., 9., 5., 5., 0.], [6., 6., 13., 7., 15., 8., 8., 0.], [6., 6., 13., 7., 15., 8., 8., 0.], [6., 6., 13., 7., 15., 8., 8., 0.], [0., 0., 0., 0., 0., 0., 0., 0.]], [[0., 0., 1., 1., 3., 2., 2., 0.], [0., 0., 1., 1., 3., 2., 2., 0.], [0., 0., 1., 1., 3., 2., 2., 0.], [3., 3., 7., 4., 9., 5., 5., 0.], [3., 3., 7., 4., 9., 5., 5., 0.], [3., 3., 7., 4., 9., 5., 5., 0.], [6., 6., 13., 7., 15., 8., 8., 0.], [6., 6., 13., 7., 15., 8., 8., 0.], [6., 6., 13., 7., 15., 8., 8., 0.], [0., 0., 0., 0., 0., 0., 0., 0.]]]]).astype(numpy.float32) with self.subTest(part="output_shape"): onx = OnnxConvTranspose( 'X', W, output_names=['Y'], strides=[3, 2], output_shape=[10, 8], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def, runtime="onnxruntime1") got = oinf.run({'X': x}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(y_with_padding, got['Y']) oinf = OnnxInference(model_def) got = oinf.run({'X': x}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(y_with_padding, got['Y']) @wraplog() def test_onnxt_runtime_conv_transpose_attributes(self): x = numpy.arange(9).reshape((1, 1, 3, 3)).astype(numpy.float32) W = numpy.ones((1, 2, 3, 3)).astype(numpy.float32) y_with_padding = numpy.array( [[[[0., 0., 1., 1., 3., 2., 2., 0.], # (1, 2, 10, 8) [0., 0., 1., 1., 3., 2., 2., 0.], [0., 0., 1., 1., 3., 2., 2., 0.], [3., 3., 7., 4., 9., 5., 5., 0.], [3., 3., 7., 4., 9., 5., 5., 0.], [3., 3., 7., 4., 9., 5., 5., 0.], [6., 6., 13., 7., 15., 8., 8., 0.], [6., 6., 13., 7., 15., 8., 8., 0.], [6., 6., 13., 7., 15., 8., 8., 0.], [0., 0., 0., 0., 0., 0., 0., 0.]], [[0., 0., 1., 1., 3., 2., 2., 0.], [0., 0., 1., 1., 3., 2., 2., 0.], [0., 0., 1., 1., 3., 2., 2., 0.], [3., 3., 7., 4., 9., 5., 5., 0.], [3., 3., 7., 4., 9., 5., 5., 0.], [3., 3., 7., 4., 9., 5., 5., 0.], [6., 6., 13., 7., 15., 8., 8., 0.], [6., 6., 13., 7., 15., 8., 8., 0.], [6., 6., 13., 7., 15., 8., 8., 0.], [0., 0., 0., 0., 0., 0., 0., 0.]]]]).astype(numpy.float32) with self.subTest(part="output_padding"): onx = OnnxConvTranspose( 'X', W, output_names=['Y'], strides=[3, 2], output_padding=[1, 1], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': x}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(y_with_padding, got['Y']) with self.subTest(part="kernel_shape"): onx = OnnxConvTranspose( 'X', W, output_names=['Y'], strides=[3, 2], output_shape=[10, 8], kernel_shape=[3, 3], output_padding=[1, 1], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': x}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(y_with_padding, got['Y']) @wraplog() def test_onnxt_runtime_conv_transpose_dilation(self): x = numpy.array([[[[3., 8., 1.], # (1, 1, 3, 3) [9., 5., 7.], [3., 2., 6.]]]]).astype(numpy.float32) W = numpy.array([[[[7., 2.], # (1, 1, 2, 2) [1., 9.]]]]).astype(numpy.float32) y_with_padding = numpy.array( [[[[21., 56., 13., 16., 2.], # [1, 1, 5, 5] [63., 35., 67., 10., 14.], [24., 22., 76., 76., 21.], [9., 5., 88., 45., 63.], [3., 2., 33., 18., 54.]]]]).astype(numpy.float32) onx = OnnxConvTranspose( 'X', W, output_names=['Y'], dilations=[2, 2], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': x}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(y_with_padding, got['Y']) @wraplog() def test_onnxt_runtime_conv_transpose_pads(self): x = numpy.arange(9).reshape((1, 1, 3, 3)).astype(numpy.float32) W = numpy.ones((1, 2, 3, 3)).astype(numpy.float32) y_with_padding = numpy.array( [[[[1., 1., 3.], # (1, 2, 7, 3) [1., 1., 3.], [7., 4., 9.], [7., 4., 9.], [7., 4., 9.], [13., 7., 15.], [13., 7., 15.]], [[1., 1., 3.], [1., 1., 3.], [7., 4., 9.], [7., 4., 9.], [7., 4., 9.], [13., 7., 15.], [13., 7., 15.]]]]).astype(numpy.float32) onx = OnnxConvTranspose( 'X', W, output_names=['Y'], strides=[3, 2], pads=[1, 2, 1, 2], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': x}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(y_with_padding, got['Y']) @wraplog() def test_onnxt_runtime_cos(self): self.common_test_onnxt_runtime_unary(OnnxCos, numpy.cos) @wraplog() def test_onnxt_runtime_cosh(self): self.common_test_onnxt_runtime_unary(OnnxCosh, numpy.cosh) @wraplog() def test_onnxt_runtime_cum_sum(self): x = numpy.array([1., 2., 3., 4., 5.]).astype(numpy.float64) axis = numpy.array([0]).astype(numpy.int32) exp = numpy.array([1., 3., 6., 10., 15.]).astype(numpy.float64) onx = OnnxCumSum('X', 'axis', output_names=['Y'], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x, 'axis': axis}, outputs=[('Y', DoubleTensorType())], target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': x.astype(numpy.float64), 'axis': axis}) self.assertEqualArray(exp, got['Y']) self.common_expected_shapes_types( oinf, {'X': x.astype(numpy.float64), 'axis': axis}, got, OnnxCumSum, model_def) python_tested.append(OnnxCumSum) oinfpy = OnnxInference(model_def, runtime="python", inplace=True) validate_python_inference(oinfpy, {'X': x, 'axis': axis}) # reverse = 1 x = numpy.array([1., 2., 3., 4., 5.]).astype(numpy.float64) axis = numpy.array([0]).astype(numpy.int32) exp = numpy.array([15., 14., 12., 9., 5.]).astype(numpy.float64) onx = OnnxCumSum('X', 'axis', output_names=['Y'], reverse=1, op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x, 'axis': axis}, outputs=[('Y', DoubleTensorType())], target_opset=get_opset_number_from_onnx()) try: got = OnnxInference(model_def).run({'X': x, 'axis': axis}) self.assertEqualArray(exp, got['Y']) except NotImplementedError: pass # exclusive = 1 x = numpy.array([1., 2., 3., 4., 5.]).astype(numpy.float64) axis = numpy.array([0]).astype(numpy.int32) exp = numpy.array([0., 1., 3., 6., 10.]).astype(numpy.float64) onx = OnnxCumSum('X', 'axis', output_names=['Y'], exclusive=1, op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x, 'axis': axis}, outputs=[('Y', DoubleTensorType())], target_opset=get_opset_number_from_onnx()) try: got = OnnxInference(model_def).run({'X': x, 'axis': axis}) self.assertEqualArray(exp, got['Y']) except NotImplementedError: pass # 2d axis = 0 x = numpy.array([1., 2., 3., 4., 5., 6.]).astype( numpy.float64).reshape((2, 3)) axis = numpy.array([0]).astype(numpy.int32) exp = numpy.array([1., 2., 3., 5., 7., 9.]).astype( numpy.float64).reshape((2, 3)) onx = OnnxCumSum('X', 'axis', output_names=['Y'], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x, 'axis': axis}, outputs=[('Y', DoubleTensorType())], target_opset=get_opset_number_from_onnx()) got = OnnxInference(model_def).run({'X': x, 'axis': axis}) self.assertEqualArray(exp, got['Y']) # 2d axis = 1 x = numpy.array([1., 2., 3., 4., 5., 6.]).astype( numpy.float64).reshape((2, 3)) axis = numpy.array([-1]).astype(numpy.int32) exp = numpy.array([1., 3., 6., 4., 9., 15.]).astype( numpy.float64).reshape((2, 3)) onx = OnnxCumSum('X', 'axis', output_names=['Y'], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x, 'axis': axis}, outputs=[('Y', DoubleTensorType())], target_opset=get_opset_number_from_onnx()) got = OnnxInference(model_def).run({'X': x, 'axis': axis}) self.assertEqualArray(exp, got['Y']) # 2d axis = 1, reverse x = numpy.array([1., 2., 3., 4., 5., 6.]).astype( numpy.float64).reshape((2, 3)) axis = numpy.array([-1]).astype(numpy.int32) exp = numpy.array([1., 3., 6., 4., 9., 15.]).astype( numpy.float64).reshape((2, 3)) onx = OnnxCumSum('X', 'axis', output_names=['Y'], reverse=1, op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x, 'axis': axis}, outputs=[('Y', DoubleTensorType())], target_opset=get_opset_number_from_onnx()) try: got = OnnxInference(model_def).run({'X': x, 'axis': axis}) self.assertEqualArray(exp, got['Y']) except NotImplementedError: pass # no axis x = numpy.array([1., 2., 3., 4., 5.]).astype(numpy.float64) axis = numpy.array([0]).astype(numpy.int32) exp = numpy.array([1., 3., 6., 10., 15.]).astype(numpy.float64) try: onx = OnnxCumSum('X', output_names=['Y'], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx( {'X': x}, outputs=[('Y', DoubleTensorType())], target_opset=get_opset_number_from_onnx()) got = OnnxInference(model_def).run({'X': x}) self.assertEqualArray(exp, got['Y']) except RuntimeError: pass # reverse = 1 x = numpy.array([1., 2., 3., 4., 5.]).astype(numpy.float64) axis = numpy.array([0]).astype(numpy.int32) exp = numpy.array([15., 14., 12., 9., 5.]).astype(numpy.float64) try: onx = OnnxCumSum('X', output_names=['Y'], reverse=1, op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx( {'X': x}, outputs=[('Y', DoubleTensorType())], target_opset=get_opset_number_from_onnx()) got = OnnxInference(model_def).run({'X': x}) self.assertEqualArray(exp, got['Y']) except RuntimeError: pass @wraplog() def test_onnxt_runtime_det(self): self.common_test_onnxt_runtime_unary( OnnxDet, lambda x: numpy.array([numpy.linalg.det(x)]), do_sparse=False) @wraplog() def test_onnxt_runtime_dequantize_linear(self): X = numpy.array([[[[3, 89], [34, 200], [74, 59]], [[5, 24], [24, 87], [32, 13]], [[245, 99], [4, 142], [121, 102]], ], ], dtype=numpy.uint8) x_scale = numpy.array([2, 4, 5], dtype=numpy.float32) x_zero_point = numpy.array([84, 24, 196], dtype=numpy.uint8) exp = ((X.astype(numpy.float32) - x_zero_point.reshape( (1, 3, 1, 1)).astype(numpy.float32)) * x_scale.reshape((1, 3, 1, 1))) onx = OnnxDequantizeLinear( 'X', x_scale, x_zero_point, output_names=['Y'], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': X.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': X}) self.assertEqualArray(exp, got['Y']) self.common_expected_shapes_types( oinf, {'X': X}, got, OnnxDequantizeLinear, model_def) X = numpy.array([0, 3, 128, 255]).astype(numpy.uint8) x_scale = numpy.array([2], dtype=numpy.float32) x_zero_point = numpy.array([128], dtype=numpy.uint8) exp = numpy.array([-256, -250, 0, 254], dtype=numpy.float32) onx = OnnxDequantizeLinear( 'X', x_scale, x_zero_point, output_names=['Y'], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': X.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': X}) self.assertEqualArray(exp, got['Y']) python_tested.append(OnnxDequantizeLinear) @wraplog() def test_onnxt_runtime_div(self): self.common_test_onnxt_runtime_binary(OnnxDiv, lambda x, y: x / y) @wraplog() def test_onnxt_runtime_dropout_10(self): seed = numpy.int64(0) X = numpy.random.randn(3, 4, 5).astype(numpy.float32) onx = OnnxDropout_7('X', output_names=['Y'], op_version=10) model_def = onx.to_onnx({'X': X.astype(numpy.float32)}, outputs=[('Y', FloatTensorType())], target_opset=10) oinf = OnnxInference(model_def) got = oinf.run({'X': X}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqual(got['Y'].shape, X.shape) self.assertEqualArray(got['Y'], _dropout(X, seed=seed)[0]) self.common_expected_shapes_types( oinf, {'X': X}, got, OnnxDropout_7, model_def) python_tested.append(OnnxDropout) @wraplog() def test_onnxt_runtime_dropout(self): seed = numpy.int64(0) X = numpy.random.randn(3, 4, 5).astype(numpy.float32) onx = OnnxDropout('X', output_names=['Y'], seed=seed, op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': X.astype(numpy.float32)}, outputs=[('Y', FloatTensorType())], target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': X}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqual(got['Y'].shape, X.shape) self.assertEqualArray(got['Y'], _dropout(X, seed=seed)[0]) self.common_expected_shapes_types( oinf, {'X': X}, got, OnnxDropout, model_def) onx = OnnxDropout('X', output_names=['Y', 'Z'], seed=seed, op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': X.astype(numpy.float32)}, outputs=[('Y', FloatTensorType()), ('Z', FloatTensorType())], target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': X}) self.assertEqual(list(sorted(got)), ['Y', 'Z']) self.assertEqual(got['Y'].shape, X.shape) res = _dropout(X, seed=seed, return_mask=True) self.assertEqualArray(got['Y'], res[0]) self.assertEqualArray(got['Z'], res[1]) R = numpy.array([0.1], dtype=numpy.float32) onx = OnnxDropout('X', 'R', output_names=['Y'], seed=seed, op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': X.astype(numpy.float32), 'R': R.astype(numpy.float32)}, outputs=[('Y', FloatTensorType())], target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': X, 'R': R}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqual(got['Y'].shape, X.shape) self.assertEqualArray( got['Y'], _dropout(X, seed=seed, drop_probability=0.1)[0]) R = numpy.array([0.75], dtype=numpy.float32) B = numpy.array([True]) onx = OnnxDropout('X', 'R', 'B', output_names=['Y'], seed=seed, op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': X.astype(numpy.float32), 'R': R, 'B': B}, outputs=[('Y', FloatTensorType())], target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': X, 'R': R, 'B': B}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqual(got['Y'].shape, X.shape) self.assertEqualArray( got['Y'], _dropout(X, seed=seed, drop_probability=0.75, training_mode=True)[0]) python_tested.append(OnnxDropout) @wraplog() def test_onnxt_runtime_einsum(self): X = numpy.random.randn(5, 2, 3).astype(numpy.float32) Y = numpy.random.randn(5, 3, 4).astype(numpy.float32) equation = 'bij,bjk->bik' onx = OnnxEinsum( 'X', 'Y', equation=equation, output_names=['Z'], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': X.astype(numpy.float32), 'Y': Y.astype(numpy.float32)}, outputs=[('Z', FloatTensorType([2]))], target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': X, 'Y': Y}) exp = numpy.einsum(equation, X, Y) self.assertEqualArray(exp, got['Z']) self.common_expected_shapes_types( oinf, {'X': X, 'Y': Y}, got, OnnxEinsum, model_def) python_tested.append(OnnxEinsum) oinfpy = OnnxInference(model_def, runtime="python", inplace=True) validate_python_inference(oinfpy, {'X': X.astype(numpy.float32), 'Y': Y.astype(numpy.float32)}) @wraplog() def test_onnxt_runtime_eyelike(self): onx = OnnxEyeLike('X', k=0, output_names=['Y']) X = numpy.array([2, 2], dtype=numpy.int64) model_def = onx.to_onnx({'X': X.astype(numpy.int64)}, target_opset=get_opset_number_from_onnx(), outputs=[('Y', FloatTensorType())]) oinf = OnnxInference(model_def) got = oinf.run({'X': X}) self.assertEqual(list(sorted(got)), ['Y']) exp = numpy.eye(*X, k=0) self.assertEqualArray(exp, got['Y']) self.common_expected_shapes_types( oinf, {'X': X}, got, OnnxEyeLike, model_def) oinfpy = OnnxInference(model_def, runtime="python") validate_python_inference(oinfpy, {'X': X.astype(numpy.int64)}) python_tested.append(OnnxEyeLike) @wraplog() def test_onnxt_runtime_equal(self): self.common_test_onnxt_runtime_binary(OnnxEqual, numpy.equal) @wraplog() def test_onnxt_runtime_erf(self): self.common_test_onnxt_runtime_unary(OnnxErf, erf) @wraplog() def test_onnxt_runtime_exp(self): self.common_test_onnxt_runtime_unary(OnnxExp, numpy.exp) @wraplog() def test_onnxt_runtime_flatten(self): shape = (2, 3, 4, 5) x = numpy.random.random_sample(shape).astype( # pylint: disable=E1101 numpy.float32) # pylint: disable=E1101 for i in range(len(shape)): node = OnnxFlatten('X', axis=i, output_names='Y', op_version=get_opset_number_from_onnx()) model_def = node.to_onnx( {'X': x}, outputs=[('Y', FloatTensorType())], target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': x}) new_shape = ((1, -1) if i == 0 else (numpy.prod(shape[0:i]).astype(int), -1)) exp = numpy.reshape(x, new_shape) self.assertEqualArray(exp, got['Y']) self.common_expected_shapes_types( oinf, {'X': x}, got, OnnxFlatten, model_def) python_tested.append(OnnxFlatten) oinfpy = OnnxInference(model_def, runtime="python", inplace=True) validate_python_inference(oinfpy, {'X': x}) @wraplog() def test_onnxt_runtime_floor(self): self.common_test_onnxt_runtime_unary(OnnxFloor, numpy.floor) @wraplog() def test_onnxt_runtime_gather_elements0(self): from skl2onnx.algebra.onnx_ops import OnnxGatherElements # pylint: disable=E0611 # ex 1 data = numpy.array([[1, 2], [3, 4]], dtype=numpy.float32) indices = numpy.array([], dtype=numpy.int64) onx = OnnxGatherElements('X', 'Y', output_names=['Z'], axis=1, op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': data, 'Y': indices}, outputs=[('Z', FloatTensorType())], target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': data, 'Y': indices}) self.assertEqual(got['Z'].size, 0) self.common_expected_shapes_types( oinf, {'X': data, 'Y': indices}, got, OnnxGatherElements, model_def) @wraplog() def test_onnxt_runtime_gather_elements0_fortran(self): from skl2onnx.algebra.onnx_ops import OnnxGatherElements # pylint: disable=E0611 # ex 1 data = numpy.array([[1, 2], [3, 4]], dtype=numpy.float32, order='F') indices = numpy.array([], dtype=numpy.int64, order='F') onx = OnnxGatherElements('X', 'Y', output_names=['Z'], axis=1, op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': data, 'Y': indices}, outputs=[('Z', FloatTensorType())], target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': data, 'Y': indices}) self.assertEqual(got['Z'].size, 0) @wraplog() def test_onnxt_runtime_gather_elements(self): from skl2onnx.algebra.onnx_ops import OnnxGatherElements # pylint: disable=E0611 # ex 1 data = numpy.array([[1, 2], [3, 4]], dtype=numpy.float32) indices = numpy.array([[0, 0], [1, 0]], dtype=numpy.int64) onx = OnnxGatherElements('X', 'Y', output_names=['Z'], axis=1, op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': data, 'Y': indices}, outputs=[('Z', FloatTensorType())], target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': data, 'Y': indices}) exp = numpy.array([[1, 1], [4, 3]], dtype=numpy.float32) self.assertEqual(exp, got['Z']) python_tested.append(OnnxGatherElements) oinfpy = OnnxInference(model_def, runtime="python", inplace=True) validate_python_inference(oinfpy, {'X': data, 'Y': indices}) # ex 2 data = numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=numpy.float32) indices = numpy.array([[1, 2, 0], [2, 0, 0]], dtype=numpy.int32) onx = OnnxGatherElements('X', 'Y', output_names=['Z'], axis=0, op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': data, 'Y': indices}, outputs=[('Z', FloatTensorType())], target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': data, 'Y': indices}) exp = numpy.array([[4, 8, 3], [7, 2, 3]], dtype=numpy.float32) self.assertEqual(exp, got['Z']) @wraplog() def test_onnxt_runtime_gemm_python(self): self.do_test_onnxt_runtime_gemm("python") python_tested.append(OnnxGemm) @wraplog() def test_onnxt_runtime_gemm_onnxruntime(self): self.do_test_onnxt_runtime_gemm("onnxruntime1") def do_test_onnxt_runtime_gemm(self, runtime): idi = numpy.array([[1, 0], [1, 1]], dtype=numpy.float32) cst = numpy.array([4, 5], dtype=numpy.float32) X = numpy.array([[1, 2], [3, 4]], dtype=numpy.float32) onx = OnnxGemm('X', idi, cst, output_names=['Y'], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': idi.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) if 'onnxruntime' in runtime: model_def.ir_version = get_ir_version_from_onnx() try: oinf = OnnxInference(model_def, runtime=runtime) except RuntimeError as e: raise RuntimeError( "Unable to instantiate (runtime='{}')\n{}".format( runtime, model_def)) from e got = oinf.run({'X': X.astype(numpy.float32)}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(numpy.dot(X, idi) + cst, got['Y'], decimal=5) onx = OnnxGemm('X', idi, cst, transA=1, transB=1, output_names=['Y'], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': idi.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) if 'onnxruntime' in runtime: model_def.ir_version = get_ir_version_from_onnx() try: oinf = OnnxInference(model_def, runtime=runtime) except RuntimeError as e: raise RuntimeError( "Unable to instantiate (runtime='{}')\n{}".format( runtime, model_def)) from e got = oinf.run({'X': X.astype(numpy.float32)}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(numpy.dot(X.T, idi.T) + cst, got['Y'], decimal=5) onx = OnnxGemm('X', idi, cst, transA=1, output_names=['Y'], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': idi.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) model_def.ir_version = get_ir_version_from_onnx() oinf = OnnxInference(model_def, runtime=runtime) got = oinf.run({'X': X.astype(numpy.float32)}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(numpy.dot(X.T, idi) + cst, got['Y'], decimal=5) onx = OnnxGemm('X', idi, cst, transB=1, output_names=['Y'], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': idi.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) if 'onnxruntime' in runtime: model_def.ir_version = get_ir_version_from_onnx() oinf = OnnxInference(model_def, runtime=runtime) got = oinf.run({'X': X.astype(numpy.float32)}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(numpy.dot(X, idi.T) + cst, got['Y'], decimal=5) onx = OnnxGemm('X', idi, cst, transB=1, output_names=['Y'], alpha=numpy.float32(1.), op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': idi.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) if 'onnxruntime' in runtime: model_def.ir_version = get_ir_version_from_onnx() oinf = OnnxInference(model_def, runtime=runtime) got = oinf.run({'X': X.astype(numpy.float32)}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(numpy.dot(X, idi.T) + cst, got['Y'], decimal=5) if runtime != 'onnxruntime1': onx = OnnxGemm('X', idi, cst, transB=1, output_names=['Y'], alpha=numpy.float32(1.), op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': idi.astype(numpy.float64)}, target_opset=get_opset_number_from_onnx()) if 'onnxruntime' in runtime: model_def.ir_version = get_ir_version_from_onnx() oinf = OnnxInference(model_def, runtime=runtime) got = oinf.run({'X': X.astype(numpy.float32)}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(numpy.dot(X, idi.T) + cst, got['Y'], decimal=5) @wraplog() def test_onnxt_runtime_global_average_pool(self): x = x = numpy.random.randn(1, 3, 5, 5).astype(numpy.float32) y = _global_average_pool(x).astype(numpy.float32) onx = OnnxGlobalAveragePool( 'X', output_names=['Y'], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': x}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(y, got['Y']) self.common_expected_shapes_types( oinf, {'X': x}, got, OnnxGlobalAveragePool, model_def) x = numpy.array([[[ [1, 2, 3], [4, 5, 6], [7, 8, 9], ]]]).astype(numpy.float32) y = numpy.array([[[[5]]]]).astype(numpy.float32) onx = OnnxGlobalAveragePool( 'X', output_names=['Y'], op_version=get_opset_number_from_onnx()) model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': x}) self.assertEqual(list(sorted(got)), ['Y']) self.assertEqualArray(y, got['Y']) python_tested.append(OnnxGlobalAveragePool) def test_onnxt_runtime_greater(self): self.common_test_onnxt_runtime_binary(OnnxGreater, numpy.greater) @wraplog() def test_onnxt_runtime_greater_or_equal(self): self.common_test_onnxt_runtime_binary( OnnxGreaterOrEqual, numpy.greater_equal) @wraplog() def test_onnxt_runtime_identity(self): self.common_test_onnxt_runtime_unary(OnnxIdentity, lambda x: x) @wraplog() def test_onnxt_runtime_isnan(self): self.common_test_onnxt_runtime_unary(OnnxIsNaN, numpy.isnan) @wraplog() def test_onnxt_runtime_less(self): self.common_test_onnxt_runtime_binary(OnnxLess, numpy.less) @wraplog() def test_onnxt_runtime_less_or_equal(self): self.common_test_onnxt_runtime_binary( OnnxLessOrEqual, numpy.less_equal) @wraplog() def test_onnxt_runtime_log(self): self.common_test_onnxt_runtime_unary(OnnxLog, numpy.log) @wraplog() def test_onnxt_runtime_lp_normalization(self): onx = OnnxLpNormalization('X', output_names=['Y'], p=2, axis=1, op_version=get_opset_number_from_onnx()) X = numpy.array([[1, 2], [3, -4]], dtype=numpy.float32) model_def = onx.to_onnx({'X': X}, target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': X}) exp = numpy.array([[0.4472136, 0.8944272], [0.6, -0.8]], dtype=numpy.float32) self.assertEqualArray(got['Y'], exp) self.common_expected_shapes_types( oinf, {'X': X}, got, OnnxLpNormalization, model_def) onx = OnnxLpNormalization('X', output_names=['Y'], p=2, axis=0, op_version=get_opset_number_from_onnx()) X = numpy.array([[1, 2], [3, -4]], dtype=numpy.float32) model_def = onx.to_onnx({'X': X}, target_opset=get_opset_number_from_onnx()) oinf = OnnxInference(model_def) got = oinf.run({'X': X}) exp = numpy.array([[0.3162278, 0.4472136], [0.9486833, -0.8944272]], dtype=numpy.float32) self.assertEqualArray(got['Y'], exp) python_tested.append(OnnxLpNormalization) @wraplog() def test_onnxt_runtime_matmul(self): self.common_test_onnxt_runtime_binary(OnnxMatMul, lambda x, y: x @ y) @wraplog() def test_onnxt_runtime_max(self): self.common_test_onnxt_runtime_binary( OnnxMax, lambda x, y: numpy.maximum(x, y)) @wraplog() def test_onnxt_runtime_max_pool_1d_default(self): X =
numpy.random.randn(1, 3, 32)
numpy.random.randn
import numpy as np def make_input(file_list): atom_fea_list = [] bond_fea_list = [] glob_fea_list = [] bond_idx_atom1_list = [] bond_idx_atom2_list = [] atom_idx_list = [] bond_idx_list = [] E_list = [] Ga_idx_list = [] As_idx_list = [] num_lattice = 9 stacked_atom_len = 0 for f in range(0, len(file_list)): infile2 = open(file_list[f][:-1],'r') data_list = infile2.readlines() infile2.close() ##### define the basic information of structure ##### cell = data_list[4:7] atoms = data_list[num_lattice:] cell_list = [] for i in range(len(cell)): cell_list.append(cell[i].split()[0:3]) cell_matrix = np.asfarray(cell_list) atom_list = [] for i in range(len(atoms)): atom_list.append(atoms[i].split()[1:4]) atom_matrix = np.asfarray(atom_list) ##### define the feature and index of bonds ##### if f == 0: for i in range(len(atoms)): for j in range(len(atoms)): d_min = sum((atom_matrix[i,:] - atom_matrix[j,:])**2)**(1/2) for l1 in [-1,0,1]: for l2 in [-1,0,1]: for l3 in [-1,0,1]: d = sum((atom_matrix[i,:] - atom_matrix[j,:] + l1*cell_matrix[0,:] + l2*cell_matrix[1,:] + l3*cell_matrix[2,:])**2)**(1/2) d_min = min([d_min, d]) if d_min <= 3.0 and i != j: bond_fea_list.append([1.0/d_min]) bond_idx_atom1_list.append(i+len(atom_idx_list)) bond_idx_atom2_list.append(j+len(atom_idx_list)) bond_idx_list.append(f) num_edge = len(bond_idx_list) else: for i in range(num_edge): d_min = sum((atom_matrix[bond_idx_atom1_list[i],:] - atom_matrix[bond_idx_atom2_list[i],:])**2)**(1/2) for l1 in [-1,0,1]: for l2 in [-1,0,1]: for l3 in [-1,0,1]: d = sum((atom_matrix[bond_idx_atom1_list[i],:] - atom_matrix[bond_idx_atom2_list[i],:] + l1*cell_matrix[0,:] + l2*cell_matrix[1,:] + l3*cell_matrix[2,:])**2)**(1/2) d_min = min([d_min, d]) bond_fea_list.append([1.0/d_min]) bond_idx_atom1_list.append(bond_idx_atom1_list[i]+len(atom_idx_list)) bond_idx_atom2_list.append(bond_idx_atom2_list[i]+len(atom_idx_list)) bond_idx_list.append(f) ##### define the feature and index of atoms ##### for i in range(len(atoms)): atom_idx_list.append(f) if 'Ga' in atoms[i].split(): atom_fea_list.append([0.,0.,1.,0.,0.,0.,0.,0.]) Ga_idx_list.append(stacked_atom_len + i) elif 'As' in atoms[i].split(): atom_fea_list.append([0.,0.,0.,0.,1.,0.,0.,0.]) As_idx_list.append(stacked_atom_len + i) ####### define the global feature and E ####### glob_fea_list.append(np.empty([0])) E_list.append([float(data_list[0].split()[4])]) stacked_atom_len = stacked_atom_len + len(atoms) Ga_grp_list = list(np.array(atom_idx_list)[Ga_idx_list]) As_grp_list = list(np.array(atom_idx_list)[As_idx_list]) return [atom_fea_list, bond_fea_list, glob_fea_list, bond_idx_atom1_list, bond_idx_atom2_list, atom_idx_list, bond_idx_list, Ga_idx_list, As_idx_list, Ga_grp_list, As_grp_list], E_list def standarization(E_list): data_E_matrix = np.asfarray(E_list) num_data = data_E_matrix.shape[0] # num of data data_E_mean = data_E_matrix.mean() data_E_std = 0 for i in range(num_data): data_E_std += (data_E_matrix[i]-data_E_mean)**2 data_E_std = (data_E_std/(num_data-1))**0.5 data_stE_matrix = (data_E_matrix-data_E_mean)/data_E_std # standarized target E return list(data_stE_matrix), data_E_mean, data_E_std def concat_bulk_surf(bulk_inputs, bulk_E, surf_inputs, surf_E, batch_size, E_mean, E_std): surf_inputs[3] = list(np.array(surf_inputs[3]) + len(bulk_inputs[5])) surf_inputs[4] = list(np.array(surf_inputs[4]) + len(bulk_inputs[5])) surf_inputs[5] = list(np.array(surf_inputs[5]) + batch_size) surf_inputs[6] = list(np.array(surf_inputs[6]) + batch_size) surf_inputs[7] = list(np.array(surf_inputs[7]) + len(bulk_inputs[5])) surf_inputs[8] = list(np.array(surf_inputs[8]) + len(bulk_inputs[5])) surf_inputs[9] = list(np.array(surf_inputs[9]) + batch_size) surf_inputs[10] = list(np.array(surf_inputs[10]) + batch_size) atom_fea = np.asfarray(bulk_inputs[0] + surf_inputs[0]) atom_fea = np.expand_dims(atom_fea, axis=0) bond_fea = np.asfarray(bulk_inputs[1] + surf_inputs[1]) bond_fea = np.expand_dims(bond_fea, axis=0) glob_fea = np.asfarray(bulk_inputs[2] + surf_inputs[2]) glob_fea = np.expand_dims(glob_fea, axis=0) bond_idx_atom1 = np.asarray(bulk_inputs[3] + surf_inputs[3]) bond_idx_atom1 = np.expand_dims(bond_idx_atom1, axis=0) bond_idx_atom2 =
np.asarray(bulk_inputs[4] + surf_inputs[4])
numpy.asarray
import numpy as np import matplotlib.pyplot as plt import scipy as sp from scipy import stats import pandas as pd ############################################################################################################### ############################################################################################################### def reg_corr_plot(): class LinearRegression: def __init__(self, beta1, beta2, error_scale, data_size): self.beta1 = beta1 self.beta2 = beta2 self.error_scale = error_scale self.x = np.random.randint(1, data_size, data_size) self.y = self.beta1 + self.beta2*self.x + self.error_scale*np.random.randn(data_size) def x_y_cor(self): return np.corrcoef(self.x, self.y)[0, 1] fig, ax = plt.subplots(nrows = 2, ncols = 4,figsize=(24, 12)) beta1, beta2, error_scale,data_size = 2, .05, 1, 100 lrg1 = LinearRegression(beta1, beta2, error_scale, data_size) ax[0, 0].scatter(lrg1.x, lrg1.y) ax[0, 0].plot(lrg1.x, beta1 + beta2*lrg1.x, color = '#FA954D', alpha = .7) ax[0, 0].set_title(r'$Y={}+{}X+{}u$'.format(beta1, beta2, error_scale)) ax[0, 0].annotate(r'$\rho={:.4}$'.format(lrg1.x_y_cor()), xy=(0.1, 0.9), xycoords='axes fraction') beta1, beta2, error_scale,data_size = 2, -.6, 1, 100 lrg2 = LinearRegression(beta1, beta2, error_scale, data_size) ax[0, 1].scatter(lrg2.x, lrg2.y) ax[0, 1].plot(lrg2.x, 2 - .6*lrg2.x, color = '#FA954D', alpha = .7) ax[0, 1].set_title(r'$Y={}+{}X+{}u$'.format(beta1, beta2, error_scale)) ax[0, 1].annotate(r'$\rho={:.4}$'.format(lrg2.x_y_cor()), xy=(0.1, 0.9), xycoords='axes fraction') beta1, beta2, error_scale,data_size = 2, 1, 1, 100 lrg3 = LinearRegression(beta1, beta2, error_scale, data_size) ax[0, 2].scatter(lrg3.x, lrg3.y) ax[0, 2].plot(lrg3.x, beta1 + beta2 * lrg3.x, color = '#FA954D', alpha = .7) ax[0, 2].set_title(r'$Y={}+{}X+{}u$'.format(beta1, beta2, error_scale)) ax[0, 2].annotate(r'$\rho={:.4}$'.format(lrg3.x_y_cor()), xy=(0.1, 0.9), xycoords='axes fraction') beta1, beta2, error_scale,data_size = 2, 3, 1, 100 lrg4 = LinearRegression(beta1, beta2, error_scale, data_size) ax[0, 3].scatter(lrg4.x, lrg4.y) ax[0, 3].plot(lrg4.x, beta1 + beta2 * lrg4.x, color = '#FA954D', alpha = .7) ax[0, 3].set_title(r'$Y={}+{}X+{}u$'.format(beta1, beta2, error_scale)) ax[0, 3].annotate(r'$\rho={:.4}$'.format(lrg4.x_y_cor()), xy=(0.1, 0.9), xycoords='axes fraction') beta1, beta2, error_scale,data_size = 2, 3, 3, 100 lrg5 = LinearRegression(beta1, beta2, error_scale, data_size) ax[1, 0].scatter(lrg5.x, lrg5.y) ax[1, 0].plot(lrg5.x, beta1 + beta2 * lrg5.x, color = '#FA954D', alpha = .7) ax[1, 0].set_title(r'$Y={}+{}X+{}u$'.format(beta1, beta2, error_scale)) ax[1, 0].annotate(r'$\rho={:.4}$'.format(lrg5.x_y_cor()), xy=(0.1, 0.9), xycoords='axes fraction') beta1, beta2, error_scale,data_size = 2, 3, 10, 100 lrg6 = LinearRegression(beta1, beta2, error_scale, data_size) ax[1, 1].scatter(lrg6.x, lrg6.y) ax[1, 1].plot(lrg6.x, beta1 + beta2 * lrg6.x, color = '#FA954D', alpha = .7) ax[1, 1].set_title(r'$Y={}+{}X+{}u$'.format(beta1, beta2, error_scale)) ax[1, 1].annotate(r'$\rho={:.4}$'.format(lrg6.x_y_cor()), xy=(0.1, 0.9), xycoords='axes fraction') beta1, beta2, error_scale,data_size = 2, 3, 20, 100 lrg7 = LinearRegression(beta1, beta2, error_scale, data_size) ax[1, 2].scatter(lrg7.x, lrg7.y) ax[1, 2].plot(lrg7.x, beta1 + beta2 * lrg7.x, color = '#FA954D', alpha = .7) ax[1, 2].set_title(r'$Y={}+{}X+{}u$'.format(beta1, beta2, error_scale)) ax[1, 2].annotate(r'$\rho={:.4}$'.format(lrg7.x_y_cor()), xy=(0.1, 0.9), xycoords='axes fraction') beta1, beta2, error_scale,data_size = 2, 3, 50, 100 lrg8 = LinearRegression(beta1, beta2, error_scale, data_size) ax[1, 3].scatter(lrg8.x, lrg8.y) ax[1, 3].plot(lrg3.x, beta1 + beta2 * lrg3.x, color = '#FA954D', alpha = .7) ax[1, 3].set_title(r'$Y={}+{}X+{}u$'.format(beta1, beta2, error_scale)) ax[1, 3].annotate(r'$\rho={:.4}$'.format(lrg8.x_y_cor()), xy=(0.1, 0.9), xycoords='axes fraction') ############################################################################################################### ############################################################################################################### def central_limit_theorem_plot(): fig, ax = plt.subplots(4, 3, figsize = (20, 20)) ######################################################################################## x = np.linspace(2, 8, 100) a = 2 # range of uniform distribution b = 8 unif_pdf = np.ones(len(x)) * 1/(b-a) ax[0, 0].plot(x, unif_pdf, lw = 3, color = 'r') ax[0, 0].plot([x[0],x[0]],[0, 1/(b-a)], lw = 3, color = 'r', alpha = .9) # vertical line ax[0, 0].plot([x[-1],x[-1]],[0, 1/(b-a)], lw = 3, color = 'r', alpha = .9) ax[0, 0].fill_between(x, 1/(b-a), 0, alpha = .5, color = 'r') ax[0, 0].set_xlim([1, 9]) ax[0, 0].set_ylim([0, .4]) ax[0, 0].set_title('Uniform Distribution', size = 18) ax[0, 0].set_ylabel('Population Distribution', size = 12) ######################################################################################## ss = 2 #sample size unif_sample_mean = np.zeros(1000) for i in range(1000): unif_sample = np.random.rand(ss) unif_sample_mean[i] = np.mean(unif_sample) ax[1, 0].hist(unif_sample_mean, bins = 20, color = 'r', alpha = .5) ax[1, 0].set_ylabel('Sample Distribution, $n = 2$', size = 12) ######################################################################################## ss = 10 #sample size unif_sample_mean = np.zeros(1000) for i in range(1000): unif_sample = np.random.rand(ss) unif_sample_mean[i] = np.mean(unif_sample) ax[2, 0].hist(unif_sample_mean, bins = 30, color = 'r', alpha = .5) ax[2, 0].set_ylabel('Sample Distribution, $n = 10$', size = 12) ######################################################################################## ss = 1000 #sample size unif_sample_mean = np.zeros(1000) for i in range(1000): unif_sample = np.random.rand(ss) unif_sample_mean[i] = np.mean(unif_sample) ax[3, 0].hist(unif_sample_mean, bins = 40, color = 'r', alpha = .5) ax[3, 0].set_ylabel('Sample Distribution, $n = 1000$', size = 12) ######################################################################################## a = 6 b = 2 x = np.linspace(0, 1, 100) beta_pdf = sp.stats.beta.pdf(x, a, b) ax[0, 1].plot(x, beta_pdf, lw = 3, color = 'g') ax[0, 1].set_ylim([0, 6]) ax[0, 1].fill_between(x, beta_pdf, 0, alpha = .5, color = 'g') ax[0, 1].set_title('Beta Distribution', size = 18) ######################################################################################## ss = 2 #sample size beta_sample_mean = np.zeros(1000) for i in range(1000): beta_sample = sp.stats.beta.rvs(a, b, size = ss) beta_sample_mean[i] = np.mean(beta_sample) ax[1, 1].hist(beta_sample_mean, color = 'g', alpha = .5) ######################################################################################## ss = 10 #sample size beta_sample_mean = np.zeros(1000) for i in range(1000): beta_sample = sp.stats.beta.rvs(a, b, size = ss) beta_sample_mean[i] = np.mean(beta_sample) ax[2, 1].hist(beta_sample_mean, color = 'g', bins = 20, alpha = .5) ######################################################################################## ss = 100000 #sample size beta_sample_mean = np.zeros(1000) for i in range(1000): beta_sample = sp.stats.beta.rvs(a, b, size = ss) beta_sample_mean[i] = np.mean(beta_sample) ax[3, 1].hist(beta_sample_mean, color = 'g', bins = 30, alpha = .5) ######################################################################################## a = 6 x = np.linspace(0, 25, 100) gamma_pdf = sp.stats.gamma.pdf(x, a) ax[0, 2].plot(x, gamma_pdf, lw = 3, color = 'b') ax[0, 2].set_ylim([0, 0.34]) ax[0, 2].fill_between(x, gamma_pdf, 0, alpha = .5, color = 'b') ax[0, 2].set_title('Gamma Distribution', size = 18) ######################################################################################## ss = 2 #sample size gamma_sample_mean = np.zeros(1000) for i in range(1000): gamma_sample = sp.stats.gamma.rvs(a, size = ss) gamma_sample_mean[i] = np.mean(gamma_sample) ax[1, 2].hist(gamma_sample_mean, color = 'b', alpha = .5) ######################################################################################## ss = 10 #sample size gamma_sample_mean = np.zeros(1000) for i in range(1000): gamma_sample = sp.stats.gamma.rvs(a, size = ss) gamma_sample_mean[i] = np.mean(gamma_sample) ax[2, 2].hist(gamma_sample_mean, bins = 20, color = 'b', alpha = .5) ######################################################################################## ss = 1000 #sample size gamma_sample_mean = np.zeros(1000) for i in range(1000): gamma_sample = sp.stats.gamma.rvs(a, size = ss) gamma_sample_mean[i] = np.mean(gamma_sample) ax[3, 2].hist(gamma_sample_mean, bins = 30, color = 'b', alpha = .5) ######################################################################################## plt.show() ########################################################################################################## ########################################################################################################## def type12_error(): x = np.linspace(-6, 9, 200) null_loc, alter_loc = 0, 3 y_null = sp.stats.norm.pdf(x, loc = null_loc) y_alter = sp.stats.norm.pdf(x, loc = alter_loc) fig, ax = plt.subplots(figsize = (18, 6)) ax.plot(x, y_null, x, y_alter) ax.annotate('Null', (null_loc-.2, max(y_null)/2), size = 15) ax.annotate('Alternative', (alter_loc-.6, max(y_alter)/2), size = 15) ax.annotate('Type I Error', (2, max(y_alter)/30), size = 15) ax.annotate('Type II Error', (0, max(y_alter)/30), size = 15) ax.fill_between(x[-98:], y_null[-98:]) ax.fill_between(x[:103], y_alter[:103]) ax.set_ylim([0, .5]) plt.show() ########################################################################################################## ########################################################################################################## # def reject_region(): # data = pd.read_csv('500_Person_Gender_Height_Weight_Index.csv') # male_mean = data[data['Gender']=='Male']['Height'].mean() # male_std = data[data['Gender']=='Male']['Height'].std(ddof=1) # male_std_error = male_std/np.sqrt(len(data[data['Gender']=='Male'])) # male_null = 172 # df = len(data[data['Gender']=='Male'])-1 # t_975 = sp.stats.t.ppf(.975, df=df) # t_025 = sp.stats.t.ppf(.025, df=df) # x = np.linspace(male_null-5, male_null+5, 200) # df = len(data[data['Gender']=='Male'])-1 # y_t = sp.stats.t.pdf(x, df = df, loc = male_null) # fig, ax = plt.subplots(2, 1, figsize = (18,8)) # ax[0].plot(x, y_t, color = 'tomato', lw = 3) # rejection_lower = male_null - t_975*male_std_error # x_rej_lower = np.linspace(rejection_lower-3, rejection_lower, 30) # y_rej_lower = sp.stats.t.pdf(x_rej_lower, df = df, loc = male_null) # ax[0].fill_between(x_rej_lower, y_rej_lower, color = 'tomato', alpha = .7) # rejection_upper = male_null + t_975*male_std_error # x_rej_upper = np.linspace(rejection_upper, rejection_upper+3, 30) # y_rej_upper = sp.stats.t.pdf(x_rej_upper, df = df, loc = male_null) # ax[0].fill_between(x_rej_upper, y_rej_upper, color = 'tomato', alpha = .7) # ax[0].set_ylim([0, .45]) # x = np.linspace(-5, 5, 200) # y_t = sp.stats.t.pdf(x, df = df, loc = 0) # ax[1].plot(x, y_t, color = 'tomato', lw = 3) # x_rej_lower = np.linspace(t_025-3, t_025, 30) # y_rej_lower = sp.stats.t.pdf(x_rej_lower, df = df) # ax[1].fill_between(x_rej_lower, y_rej_lower, color = 'tomato', alpha = .7) # x_rej_lower = np.linspace(t_975+3, t_975, 30) # y_rej_lower = sp.stats.t.pdf(x_rej_lower, df = df) # ax[1].fill_between(x_rej_lower, y_rej_lower, color = 'tomato', alpha = .7) # ax[1].set_ylim([0, .45]) # plt.show() ########################################################################################################## # ########################################################################################################## # def draw_something(): # x = np.linspace(0, 10, 100) # y = np.sin(x) # plt.plot(x, y) ########################################################################################################## ########################################################################################################## def anova_plot(): def gen_3samples(loc1, loc2, loc3, scale1, scale2, scale3, size1, size2, size3): F_statistic, p_value = [], [] for i in range(1000): a = sp.stats.norm.rvs(loc1, scale1, size1) b = sp.stats.norm.rvs(loc2, scale2, size3) c = sp.stats.norm.rvs(loc3, scale3, size3) F, p = sp.stats.f_oneway(a,b,c) F_statistic.append(F) p_value.append(p) return F_statistic, p_value fig, ax = plt.subplots(nrows = 6, ncols = 3, figsize = (17, 34)) mu1, mu2, mu3, sig1, sig2, sig3, size1, size2, size3 = [3, 6, 9, 6, 6, 6, 10, 20, 30] params = [mu1, mu2, mu3, sig1, sig2, sig3, size1, size2, size3] F_statistic, p_value = gen_3samples(*params) n, bins, patches = ax[0,0].hist(F_statistic, bins = 50) F_critical = sp.stats.f.ppf(.95, 2, size1+size2+size3-3) textstr = '\n'.join(( '$\mu_1, \mu_2, \mu_3 = {}, {}, {}$'.format(mu1, mu2, mu3), '$\sigma_1, \sigma_2, \sigma_3 = {}, {}, {}$'.format(sig1, sig2, sig3), '$n_1, n_2, n_3 = {}, {}, {}$'.format(size1, size2, size3), r'$F_c = {:.4f}$'.format(F_critical))) props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) ax[0,0].text(max(bins)/2, max(n)/2, textstr, fontsize=10, verticalalignment='top', bbox=props) ax[0,0].set_title('Simulation 1') ax[0,0].vlines(F_critical, 0, max(n)*1.1, color = 'r') ax[1,0].hist(p_value,bins = 50) mu1, mu2, mu3, sig1, sig2, sig3, size1, size2, size3 = [3, 3.1, 2.9, 6, 6, 6, 10, 20, 30] params = [mu1, mu2, mu3, sig1, sig2, sig3, size1, size2, size3] F_statistic, p_value = gen_3samples(*params) n, bins, patches = ax[0,1].hist(F_statistic, bins = 50) F_critical = sp.stats.f.ppf(.95, 2, size1+size2+size3-3) textstr = '\n'.join(( '$\mu_1, \mu_2, \mu_3 = {}, {}, {}$'.format(mu1, mu2, mu3), '$\sigma_1, \sigma_2, \sigma_3 = {}, {}, {}$'.format(sig1, sig2, sig3), '$n_1, n_2, n_3 = {}, {}, {}$'.format(size1, size2, size3), r'$F_c = {:.4f}$'.format(F_critical))) props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) ax[0,1].text(max(bins)/2, max(n)/2, textstr, fontsize=10, verticalalignment='top', bbox=props) ax[0,1].vlines(F_critical, 0, max(n)*1.1, color = 'r') ax[1,1].hist(p_value,bins = 50) ax[0,1].set_title('Simulation 2') mu1, mu2, mu3, sig1, sig2, sig3, size1, size2, size3 = [3, 3.1, 2.9, 6, 12, 18, 10, 20, 30] params = [mu1, mu2, mu3, sig1, sig2, sig3, size1, size2, size3] F_statistic, p_value = gen_3samples(*params) n, bins, patches = ax[0,2].hist(F_statistic, bins = 50) F_critical = sp.stats.f.ppf(.95, 2, size1+size2+size3-3) textstr = '\n'.join(( '$\mu_1, \mu_2, \mu_3 = {}, {}, {}$'.format(mu1, mu2, mu3), '$\sigma_1, \sigma_2, \sigma_3 = {}, {}, {}$'.format(sig1, sig2, sig3), '$n_1, n_2, n_3 = {}, {}, {}$'.format(size1, size2, size3), r'$F_c = {:.4f}$'.format(F_critical))) props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) ax[0,2].text(max(bins)/2, max(n)/2, textstr, fontsize=10, verticalalignment='top', bbox=props) ax[0,2].vlines(F_critical, 0, max(n)*1.1, color = 'r') ax[1,2].hist(p_value,bins = 50) ax[0,2].set_title('Simulation 3') mu1, mu2, mu3, sig1, sig2, sig3, size1, size2, size3 = [3, 6, 9, 10, 10, 10, 10, 20, 30] params = [mu1, mu2, mu3, sig1, sig2, sig3, size1, size2, size3] F_statistic, p_value = gen_3samples(*params) n, bins, patches = ax[2,0].hist(F_statistic, bins = 50) F_critical = sp.stats.f.ppf(.95, 2, size1+size2+size3-3) textstr = '\n'.join(( '$\mu_1, \mu_2, \mu_3 = {}, {}, {}$'.format(mu1, mu2, mu3), '$\sigma_1, \sigma_2, \sigma_3 = {}, {}, {}$'.format(sig1, sig2, sig3), '$n_1, n_2, n_3 = {}, {}, {}$'.format(size1, size2, size3), r'$F_c = {:.4f}$'.format(F_critical))) props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) ax[2,0].text(max(bins)/2, max(n)/2, textstr, fontsize=10, verticalalignment='top', bbox=props) ax[2,0].vlines(F_critical, 0, max(n)*1.1, color = 'r') ax[3,0].hist(p_value,bins = 50) ax[2,0].set_title('Simulation 4') mu1, mu2, mu3, sig1, sig2, sig3, size1, size2, size3 = [3, 5, 6, 10, 10, 10, 10, 10, 10] params = [mu1, mu2, mu3, sig1, sig2, sig3, size1, size2, size3] F_statistic, p_value = gen_3samples(*params) n, bins, patches = ax[2,1].hist(F_statistic, bins = 50) F_critical = sp.stats.f.ppf(.95, 2, size1+size2+size3-3) textstr = '\n'.join(( '$\mu_1, \mu_2, \mu_3 = {}, {}, {}$'.format(mu1, mu2, mu3), '$\sigma_1, \sigma_2, \sigma_3 = {}, {}, {}$'.format(sig1, sig2, sig3), '$n_1, n_2, n_3 = {}, {}, {}$'.format(size1, size2, size3), r'$F_c = {:.4f}$'.format(F_critical))) props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) ax[2,1].text(max(bins)/2, max(n)/2, textstr, fontsize=10, verticalalignment='top', bbox=props) ax[2,1].vlines(F_critical, 0, max(n)*1.1, color = 'r') ax[3,1].hist(p_value,bins = 50) ax[2,1].set_title('Simulation 5') mu1, mu2, mu3, sig1, sig2, sig3, size1, size2, size3 = [3, 5, 6, 10, 10, 10, 5000, 5000, 5000] params = [mu1, mu2, mu3, sig1, sig2, sig3, size1, size2, size3] F_statistic, p_value = gen_3samples(*params) n, bins, patches = ax[2,2].hist(F_statistic, bins = 50) F_critical = sp.stats.f.ppf(.95, 2, size1+size2+size3-3) textstr = '\n'.join(( '$\mu_1, \mu_2, \mu_3 = {}, {}, {}$'.format(mu1, mu2, mu3), '$\sigma_1, \sigma_2, \sigma_3 = {}, {}, {}$'.format(sig1, sig2, sig3), '$n_1, n_2, n_3 = {}, {}, {}$'.format(size1, size2, size3), r'$F_c = {:.4f}$'.format(F_critical))) props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) ax[2,2].text(max(bins)/2, max(n)/2, textstr, fontsize=10, verticalalignment='top', bbox=props) ax[2,2].vlines(F_critical, 0, max(n)*1.1, color = 'r') ax[3,2].hist(p_value,bins = 50) ax[2,2].set_title('Simulation 6') mu1, mu2, mu3, sig1, sig2, sig3, size1, size2, size3 = [3, 3, 3, 100, 100, 100, 10, 10, 10] params = [mu1, mu2, mu3, sig1, sig2, sig3, size1, size2, size3] F_statistic, p_value = gen_3samples(*params) n, bins, patches = ax[4,0].hist(F_statistic, bins = 50) F_critical = sp.stats.f.ppf(.95, 2, size1+size2+size3-3) textstr = '\n'.join(( '$\mu_1, \mu_2, \mu_3 = {}, {}, {}$'.format(mu1, mu2, mu3), '$\sigma_1, \sigma_2, \sigma_3 = {}, {}, {}$'.format(sig1, sig2, sig3), '$n_1, n_2, n_3 = {}, {}, {}$'.format(size1, size2, size3), r'$F_c = {:.4f}$'.format(F_critical))) props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) ax[4,0].text(max(bins)/2, max(n)/2, textstr, fontsize=10, verticalalignment='top', bbox=props) ax[4,0].vlines(F_critical, 0, max(n)*1.1, color = 'r') ax[5,0].hist(p_value,bins = 50) ax[4,0].set_title('Simulation 7') mu1, mu2, mu3, sig1, sig2, sig3, size1, size2, size3 = [3, 3, 3, 1, 1, 2, 10, 20, 30] params = [mu1, mu2, mu3, sig1, sig2, sig3, size1, size2, size3] F_statistic, p_value = gen_3samples(*params) n, bins, patches = ax[4,1].hist(F_statistic, bins = 50) F_critical = sp.stats.f.ppf(.95, 2, size1+size2+size3-3) textstr = '\n'.join(( '$\mu_1, \mu_2, \mu_3 = {}, {}, {}$'.format(mu1, mu2, mu3), '$\sigma_1, \sigma_2, \sigma_3 = {}, {}, {}$'.format(sig1, sig2, sig3), '$n_1, n_2, n_3 = {}, {}, {}$'.format(size1, size2, size3), r'$F_c = {:.4f}$'.format(F_critical))) props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) ax[4,1].text(max(bins)/2, max(n)/2, textstr, fontsize=10, verticalalignment='top', bbox=props) ax[4,1].vlines(F_critical, 0, max(n)*1.1, color = 'r') ax[5,1].hist(p_value,bins = 50) ax[4,1].set_title('Simulation 8') params = [3, 3.1, 2.9, .01, .01, .01, 10, 20, 30] mu1, mu2, mu3, sig1, sig2, sig3, size1, size2, size3 = [3, 3, 3, 1, 1, 2, 10, 20, 30] params = [mu1, mu2, mu3, sig1, sig2, sig3, size1, size2, size3] F_statistic, p_value = gen_3samples(*params) n, bins, patches = ax[4,2].hist(F_statistic, bins = 50) F_critical = sp.stats.f.ppf(.95, 2, size1+size2+size3-3) textstr = '\n'.join(( '$\mu_1, \mu_2, \mu_3 = {}, {}, {}$'.format(mu1, mu2, mu3), '$\sigma_1, \sigma_2, \sigma_3 = {}, {}, {}$'.format(sig1, sig2, sig3), '$n_1, n_2, n_3 = {}, {}, {}$'.format(size1, size2, size3), r'$F_c = {:.4f}$'.format(F_critical))) props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) ax[4,2].text(max(bins)/2, max(n)/2, textstr, fontsize=10, verticalalignment='top', bbox=props) ax[4,2].vlines(F_critical, 0, max(n)*1.1, color = 'r') ax[5,2].hist(p_value,bins = 50) ax[4,2].set_title('Simulation 9') #######################||Rectangle||########################## ############################################################## rect = plt.Rectangle( # (lower-left corner), width, height (0.10, 0.633), 0.2645, 0.258, fill=False, color="k", lw=2, zorder=1000, transform=fig.transFigure, figure=fig ) fig.patches.extend([rect]) rect = plt.Rectangle( # (lower-left corner), width, height (0.3755, 0.633), 0.2645, 0.258, fill=False, color="k", lw=2, zorder=1000, transform=fig.transFigure, figure=fig ) fig.patches.extend([rect]) rect = plt.Rectangle( # (lower-left corner), width, height (0.650, 0.633), 0.2645, 0.258, fill=False, color="k", lw=2, zorder=1000, transform=fig.transFigure, figure=fig ) fig.patches.extend([rect]) rect = plt.Rectangle( # (lower-left corner), width, height (0.10, 0.37), 0.2645, 0.258, fill=False, color="k", lw=2, zorder=1000, transform=fig.transFigure, figure=fig ) fig.patches.extend([rect]) rect = plt.Rectangle( # (lower-left corner), width, height (0.3755, 0.37), 0.2645, 0.258, fill=False, color="k", lw=2, zorder=1000, transform=fig.transFigure, figure=fig ) fig.patches.extend([rect]) rect = plt.Rectangle( # (lower-left corner), width, height (0.650, 0.37), 0.2645, 0.258, fill=False, color="k", lw=2, zorder=1000, transform=fig.transFigure, figure=fig ) fig.patches.extend([rect]) rect = plt.Rectangle( # (lower-left corner), width, height (0.650, 0.108), 0.2645, 0.258, fill=False, color="k", lw=2, zorder=1000, transform=fig.transFigure, figure=fig ) fig.patches.extend([rect]) rect = plt.Rectangle( # (lower-left corner), width, height (0.3755, 0.108), 0.2645, 0.258, fill=False, color="k", lw=2, zorder=1000, transform=fig.transFigure, figure=fig ) fig.patches.extend([rect]) rect = plt.Rectangle( # (lower-left corner), width, height (0.1, 0.108), 0.2645, 0.258, fill=False, color="k", lw=2, zorder=1000, transform=fig.transFigure, figure=fig ) fig.patches.extend([rect]) #################################################################### plt.show() ############################################################################################### ############################################################################################### def two_tail_rej_region_demo(): data = pd.read_csv('500_Person_Gender_Height_Weight_Index.csv') df = len(data[data['Gender']=='Male'])-1 t_975 = sp.stats.t.ppf(.975, df=df) t_025 = sp.stats.t.ppf(.025, df=df) male_mean = data[data['Gender']=='Male']['Height'].mean() male_std = data[data['Gender']=='Male']['Height'].std(ddof=1) male_std_error = male_std/np.sqrt(len(data[data['Gender']=='Male'])) male_null = 172 x = np.linspace(male_null-5, male_null+5, 200) y_t = sp.stats.t.pdf(x, df = df, loc = male_null) fig, ax = plt.subplots(2, 1, figsize = (18,8)) ax[0].plot(x, y_t, color = 'tomato', lw = 3) rejection_lower = male_null - t_975*male_std_error x_rej_lower = np.linspace(rejection_lower-3, rejection_lower, 30) y_rej_lower = sp.stats.t.pdf(x_rej_lower, df = df, loc = male_null) ax[0].fill_between(x_rej_lower, y_rej_lower, color = 'tomato', alpha = .7) rejection_upper = male_null + t_975*male_std_error x_rej_upper =
np.linspace(rejection_upper, rejection_upper+3, 30)
numpy.linspace
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function import numpy as np from nolitsa import lyapunov from numpy.testing import assert_allclose, run_module_suite def test_mle(): # Test lyapunov.mle() # Particle moving uniformly in 7d: y(t) = a + b*t a = np.random.random(7) b = np.random.random(7) n = 250 window = 15 t = np.arange(n) y = a + b * t[:, np.newaxis] for metric in ('chebyshev', 'cityblock', 'euclidean'): if metric == 'chebyshev': modb = np.max(
np.abs(b)
numpy.abs
import pytest from numba import carray, njit from pytest import approx from numerous.engine.model.lowering.llvm_builder import LLVMBuilder import numpy as np import os initial_values = np.arange(1, 10) filename = 'llvm_IR_code.txt' @pytest.fixture(autouse=True) def run_around_tests(): yield if os.path.exists(filename): os.remove(filename) eval_llvm_signature = 'void(float64, float64, CPointer(float64), CPointer(float64))' def eval_llvm(s_x1, s_x2, s_x2_dot, s_x3_dot): carray(s_x2_dot, (1,))[0] = -100 if s_x1 > s_x2 else 50 carray(s_x3_dot, (1,))[0] = -carray(s_x2_dot, (1,))[0] eval_llvm_mix_signature = 'void(float64, CPointer(float64),float64, CPointer(float64))' def eval_llvm_mix(s_x1, s_x2_dot, s_x2, s_x3_dot): carray(s_x2_dot, (1,))[0] = -100 if s_x1 > s_x2 else 50 carray(s_x3_dot, (1,))[0] = -carray(s_x2_dot, (1,))[0] eval_llvm2_signature = 'void(float64, float64, CPointer(float64), CPointer(float64))' def eval_llvm2(s_x1, s_x2, s_x2_dot, s_x3_dot): carray(s_x2_dot, (1,))[0] = nested(-100) if s_x1 > s_x2 else 50 carray(s_x3_dot, (1,))[0] = -carray(s_x2_dot, (1,))[0] @njit def nested(s_x): return s_x + 1 number_of_derivatives = 3 number_of_states = 3 variable_names = { "oscillator1.mechanics.x": 0, "oscillator1.mechanics.y": 1, "oscillator1.mechanics.z": 2, "oscillator1.mechanics.a": 3, "oscillator1.mechanics.b": 4, "oscillator1.mechanics.c": 5, "oscillator1.mechanics.x_dot": 6, "oscillator1.mechanics.y_dot": 7, "oscillator1.mechanics.z_dot": 8, } variable_distributed = { "oscillator1.mechanics.a": 0, "oscillator1.mechanics.x_dot": 1, "oscillator1.mechanics.y_dot": 2, "oscillator1.mechanics.z_dot": 3, "oscillator1.mechanics.x": 4, "oscillator1.mechanics.b": 5, "oscillator1.mechanics.c": 6, "oscillator1.mechanics.y": 7, "oscillator1.mechanics.z": 8, } DERIVATIVES = ["oscillator1.mechanics.x_dot", "oscillator1.mechanics.y_dot", "oscillator1.mechanics.z_dot"] STATES = ["oscillator1.mechanics.x", "oscillator1.mechanics.y", "oscillator1.mechanics.z"] def test_llvm_1_to_1_mapping_state(): llvm_program = LLVMBuilder(initial_values, variable_names, STATES, DERIVATIVES) llvm_program.add_mapping(["oscillator1.mechanics.x"], ["oscillator1.mechanics.x_dot"]) diff, var_func, _ = llvm_program.generate(filename) assert approx(diff(np.array([2.1, 2.2, 2.3]))) == np.array([2.1, 8, 9.]) def test_llvm_1_to_1_mapping_parameter(): llvm_program = LLVMBuilder(initial_values, variable_names, STATES, DERIVATIVES) llvm_program.add_mapping(["oscillator1.mechanics.b"], ["oscillator1.mechanics.x_dot"]) diff, var_func, _ = llvm_program.generate(filename) assert approx(diff(np.array([2.1, 2.2, 2.3]))) == np.array([5, 8, 9.]) def test_llvm_n_to_1_sum_mapping(): llvm_program = LLVMBuilder(initial_values, variable_names, STATES, DERIVATIVES) llvm_program.add_mapping(["oscillator1.mechanics.x", "oscillator1.mechanics.y", "oscillator1.mechanics.b"], ["oscillator1.mechanics.x_dot"]) diff, var_func, _ = llvm_program.generate(filename) assert approx(diff(np.array([2.1, 2.2, 2.3]))) == np.array([9.3, 8, 9.]) def test_llvm_1_to_n_mapping(): llvm_program = LLVMBuilder(initial_values, variable_names, STATES, DERIVATIVES) llvm_program.add_mapping(["oscillator1.mechanics.x"], ["oscillator1.mechanics.x_dot", "oscillator1.mechanics.y_dot"]) diff, var_func, _ = llvm_program.generate(filename) assert approx(diff(np.array([2.1, 2.2, 2.3]))) == np.array([2.1, 2.1, 9.]) def test_llvm_1_function(): llvm_program = LLVMBuilder(initial_values, variable_names, STATES, DERIVATIVES) llvm_names = llvm_program.add_external_function(eval_llvm, eval_llvm_signature, number_of_args=4, target_ids=[2, 3]) llvm_program.add_call(llvm_names[eval_llvm.__qualname__], ["oscillator1.mechanics.x", "oscillator1.mechanics.y", "oscillator1.mechanics.x_dot", "oscillator1.mechanics.y_dot"], target_ids=[2, 3]) diff, var_func, _ = llvm_program.generate(filename) assert approx(diff(np.array([2.1, 2.2, 2.3]))) == np.array([50, -50, 9.]) assert approx(diff(np.array([2.3, 2.2, 2.1]))) == np.array([-100, 100, 9.]) def test_llvm_nested_function_and_mapping(): llvm_program = LLVMBuilder(initial_values, variable_names, STATES, DERIVATIVES) llvm_names = llvm_program.add_external_function(eval_llvm2, eval_llvm2_signature, number_of_args=4, target_ids=[2, 3]) llvm_program.add_call(llvm_names[eval_llvm2.__qualname__], ["oscillator1.mechanics.x", "oscillator1.mechanics.y", "oscillator1.mechanics.a", "oscillator1.mechanics.y_dot"], target_ids=[2, 3]) llvm_program.add_mapping(args=["oscillator1.mechanics.a"], targets=["oscillator1.mechanics.x_dot"]) diff, var_func, _ = llvm_program.generate(filename) assert approx(diff(np.array([2.1, 2.2, 2.3]))) == np.array([50, -50, 9.]) assert approx(diff(np.array([2.3, 2.2, 2.1]))) == np.array([-99, 99, 9.]) def test_llvm_1_function_and_mapping(): llvm_program = LLVMBuilder(initial_values, variable_names, STATES, DERIVATIVES) llvm_names = llvm_program.add_external_function(eval_llvm, eval_llvm_signature, number_of_args=4, target_ids=[2, 3]) llvm_program.add_call(llvm_names[eval_llvm.__qualname__], ["oscillator1.mechanics.x", "oscillator1.mechanics.y", "oscillator1.mechanics.a", "oscillator1.mechanics.y_dot"], target_ids=[2, 3]) llvm_program.add_mapping(args=["oscillator1.mechanics.a"], targets=["oscillator1.mechanics.x_dot"]) diff, var_func, _ = llvm_program.generate(filename) assert approx(diff(
np.array([2.1, 2.2, 2.3])
numpy.array
""" test_gaussian.py: Test suite for the Gaussian estimator class :class:`GaussEst` """ from __future__ import print_function import unittest import numpy as np # Add the path to the vampyre package and import it import env env.add_vp_path() import vampyre as vp def lin_test(zshape=(500,10),Ashape=(1000,500),verbose=False,tol=0.1): """ Unit test for the linear estimator class The test is performed by generating random data :math:`y=Az+w, z \\sim {\\mathcal N}(r, \\tau_r I), w \\sim {\\mathcal N}(0, \\tau_w I)` Then the method estimates :math:`z` from :math:`y` and compares the expected and measured errors. :param zshape: shape of :math:`z` :param Ashape: shape of :A:`z`. This must be consistent with :code:`zshape`. :param Boolenan verbose: print results :param tol: error tolerance above which test is considered to fail. """ # Generate random parameters rvar = 10**(np.random.uniform(-1,1,1))[0] wvar = 10**(np.random.uniform(-1,1,1))[0] # Generate random matrix A = np.random.normal(0,1,Ashape)/np.sqrt(Ashape[1]) Aop = vp.trans.MatrixLT(A, zshape) yshape = Aop.shape1 # Add noise on input and output r = np.random.normal(0,1,zshape) z = r + np.random.normal(0,np.sqrt(rvar),zshape) y = A.dot(z) + np.random.normal(0,
np.sqrt(wvar)
numpy.sqrt
from copy import copy from itertools import cycle, islice import numpy as np import pandas as pd import pytest from napari._tests.utils import check_layer_world_data_extent from napari.layers import Shapes from napari.layers.utils._text_constants import TextMode from napari.utils.colormaps.standardize_color import transform_color def _make_cycled_properties(values, length): """Helper function to make property values Parameters ---------- values The values to be cycled. length : int The length of the resulting property array Returns ------- cycled_properties : np.ndarray The property array comprising the cycled values. """ cycled_properties = np.array(list(islice(cycle(values), 0, length))) return cycled_properties def test_empty_shapes(): shp = Shapes() assert shp.ndim == 2 properties_array = {'shape_type': _make_cycled_properties(['A', 'B'], 10)} properties_list = {'shape_type': list(_make_cycled_properties(['A', 'B'], 10))} @pytest.mark.parametrize("properties", [properties_array, properties_list]) def test_properties(properties): shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes(data, properties=copy(properties)) np.testing.assert_equal(layer.properties, properties) current_prop = {'shape_type': np.array(['B'])} assert layer.current_properties == current_prop # test removing shapes layer.selected_data = {0, 1} layer.remove_selected() remove_properties = properties['shape_type'][2::] assert len(layer.properties['shape_type']) == (shape[0] - 2) assert np.all(layer.properties['shape_type'] == remove_properties) # test selection of properties layer.selected_data = {0} selected_annotation = layer.current_properties['shape_type'] assert len(selected_annotation) == 1 assert selected_annotation[0] == 'A' # test adding shapes with properties new_data = np.random.random((1, 4, 2)) new_shape_type = ['rectangle'] layer.add(new_data, shape_type=new_shape_type) add_properties = np.concatenate((remove_properties, ['A']), axis=0) assert np.all(layer.properties['shape_type'] == add_properties) # test copy/paste layer.selected_data = {0, 1} layer._copy_data() assert np.all(layer._clipboard['properties']['shape_type'] == ['A', 'B']) layer._paste_data() paste_properties = np.concatenate((add_properties, ['A', 'B']), axis=0) assert np.all(layer.properties['shape_type'] == paste_properties) # test updating a property layer.mode = 'select' layer.selected_data = {0} new_property = {'shape_type': np.array(['B'])} layer.current_properties = new_property updated_properties = layer.properties assert updated_properties['shape_type'][0] == 'B' @pytest.mark.parametrize("attribute", ['edge', 'face']) def test_adding_properties(attribute): """Test adding properties to an existing layer""" shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes(data) # add properties properties = {'shape_type': _make_cycled_properties(['A', 'B'], shape[0])} layer.properties = properties np.testing.assert_equal(layer.properties, properties) # add properties as a dataframe properties_df = pd.DataFrame(properties) layer.properties = properties_df np.testing.assert_equal(layer.properties, properties) # add properties as a dictionary with list values properties_list = { 'shape_type': list(_make_cycled_properties(['A', 'B'], shape[0])) } layer.properties = properties_list assert isinstance(layer.properties['shape_type'], np.ndarray) # removing a property that was the _*_color_property should give a warning setattr(layer, f'_{attribute}_color_property', 'shape_type') properties_2 = { 'not_shape_type': _make_cycled_properties(['A', 'B'], shape[0]) } with pytest.warns(RuntimeWarning): layer.properties = properties_2 def test_data_setter_with_properties(): """Test layer data on a layer with properties via the data setter""" shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) properties = {'shape_type': _make_cycled_properties(['A', 'B'], shape[0])} layer = Shapes(data, properties=properties) # test setting to data with fewer shapes n_new_shapes = 4 new_data = 20 * np.random.random((n_new_shapes, 4, 2)) layer.data = new_data assert len(layer.properties['shape_type']) == n_new_shapes # test setting to data with more shapes n_new_shapes_2 = 6 new_data_2 = 20 * np.random.random((n_new_shapes_2, 4, 2)) layer.data = new_data_2 assert len(layer.properties['shape_type']) == n_new_shapes_2 # test setting to data with same shapes new_data_3 = 20 * np.random.random((n_new_shapes_2, 4, 2)) layer.data = new_data_3 assert len(layer.properties['shape_type']) == n_new_shapes_2 def test_properties_dataframe(): """Test if properties can be provided as a DataFrame""" shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) properties = {'shape_type': _make_cycled_properties(['A', 'B'], shape[0])} properties_df = pd.DataFrame(properties) properties_df = properties_df.astype(properties['shape_type'].dtype) layer = Shapes(data, properties=properties_df) np.testing.assert_equal(layer.properties, properties) def test_empty_layer_with_text_properties(): """Test initializing an empty layer with text defined""" default_properties = {'shape_type': np.array([1.5], dtype=float)} text_kwargs = {'text': 'shape_type', 'color': 'red'} layer = Shapes( properties=default_properties, text=text_kwargs, ) assert layer.text._mode == TextMode.PROPERTY assert layer.text.values.size == 0 np.testing.assert_allclose(layer.text.color, [1, 0, 0, 1]) # add a shape and check that the appropriate text value was added layer.add(np.random.random((1, 4, 2))) np.testing.assert_equal(layer.text.values, ['1.5']) np.testing.assert_allclose(layer.text.color, [1, 0, 0, 1]) def test_empty_layer_with_text_formatted(): """Test initializing an empty layer with text defined""" default_properties = {'shape_type': np.array([1.5], dtype=float)} layer = Shapes( properties=default_properties, text='shape_type: {shape_type:.2f}', ) assert layer.text._mode == TextMode.FORMATTED assert layer.text.values.size == 0 # add a shape and check that the appropriate text value was added layer.add(np.random.random((1, 4, 2))) np.testing.assert_equal(layer.text.values, ['shape_type: 1.50']) @pytest.mark.parametrize("properties", [properties_array, properties_list]) def test_text_from_property_value(properties): """Test setting text from a property value""" shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes(data, properties=copy(properties), text='shape_type') np.testing.assert_equal(layer.text.values, properties['shape_type']) @pytest.mark.parametrize("properties", [properties_array, properties_list]) def test_text_from_property_fstring(properties): """Test setting text with an f-string from the property value""" shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes( data, properties=copy(properties), text='type: {shape_type}' ) expected_text = ['type: ' + v for v in properties['shape_type']] np.testing.assert_equal(layer.text.values, expected_text) # test updating the text layer.text = 'type-ish: {shape_type}' expected_text_2 = ['type-ish: ' + v for v in properties['shape_type']] np.testing.assert_equal(layer.text.values, expected_text_2) # copy/paste layer.selected_data = {0} layer._copy_data() layer._paste_data() expected_text_3 = expected_text_2 + ['type-ish: A'] np.testing.assert_equal(layer.text.values, expected_text_3) # add shape layer.selected_data = {0} new_shape = np.random.random((1, 4, 2)) layer.add(new_shape) expected_text_4 = expected_text_3 + ['type-ish: A'] np.testing.assert_equal(layer.text.values, expected_text_4) @pytest.mark.parametrize("properties", [properties_array, properties_list]) def test_set_text_with_kwarg_dict(properties): text_kwargs = { 'text': 'type: {shape_type}', 'color': [0, 0, 0, 1], 'rotation': 10, 'translation': [5, 5], 'anchor': 'upper_left', 'size': 10, 'visible': True, } shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes(data, properties=copy(properties), text=text_kwargs) expected_text = ['type: ' + v for v in properties['shape_type']] np.testing.assert_equal(layer.text.values, expected_text) for property, value in text_kwargs.items(): if property == 'text': continue layer_value = getattr(layer._text, property) np.testing.assert_equal(layer_value, value) @pytest.mark.parametrize("properties", [properties_array, properties_list]) def test_text_error(properties): """creating a layer with text as the wrong type should raise an error""" shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) # try adding text as the wrong type with pytest.raises(TypeError): Shapes(data, properties=copy(properties), text=123) def test_refresh_text(): """Test refreshing the text after setting new properties""" shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) properties = {'shape_type': ['A'] * shape[0]} layer = Shapes(data, properties=copy(properties), text='shape_type') new_properties = {'shape_type': ['B'] * shape[0]} layer.properties = new_properties np.testing.assert_equal(layer.text.values, new_properties['shape_type']) def test_nd_text(): """Test slicing of text coords with nD shapes""" shapes_data = [ [[0, 10, 10, 10], [0, 10, 20, 20], [0, 10, 10, 20], [0, 10, 20, 10]], [[1, 20, 30, 30], [1, 20, 50, 50], [1, 20, 50, 30], [1, 20, 30, 50]], ] properties = {'shape_type': ['A', 'B']} text_kwargs = {'text': 'shape_type', 'anchor': 'center'} layer = Shapes(shapes_data, properties=properties, text=text_kwargs) assert layer.ndim == 4 layer._slice_dims(point=[0, 10, 0, 0], ndisplay=2) np.testing.assert_equal(layer._indices_view, [0]) np.testing.assert_equal(layer._view_text_coords[0], [[15, 15]]) layer._slice_dims(point=[1, 0, 0, 0], ndisplay=3) np.testing.assert_equal(layer._indices_view, [1]) np.testing.assert_equal(layer._view_text_coords[0], [[20, 40, 40]]) @pytest.mark.parametrize("properties", [properties_array, properties_list]) def test_data_setter_with_text(properties): """Test layer data on a layer with text via the data setter""" shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes(data, properties=copy(properties), text='shape_type') # test setting to data with fewer shapes n_new_shapes = 4 new_data = 20 * np.random.random((n_new_shapes, 4, 2)) layer.data = new_data assert len(layer.text.values) == n_new_shapes # test setting to data with more shapes n_new_shapes_2 = 6 new_data_2 = 20 * np.random.random((n_new_shapes_2, 4, 2)) layer.data = new_data_2 assert len(layer.text.values) == n_new_shapes_2 # test setting to data with same shapes new_data_3 = 20 * np.random.random((n_new_shapes_2, 4, 2)) layer.data = new_data_3 assert len(layer.text.values) == n_new_shapes_2 def test_rectangles(): """Test instantiating Shapes layer with a random 2D rectangles.""" # Test a single four corner rectangle shape = (1, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes(data) assert layer.nshapes == shape[0] assert np.all(layer.data[0] == data[0]) assert layer.ndim == shape[2] assert np.all([s == 'rectangle' for s in layer.shape_type]) # Test multiple four corner rectangles shape = (10, 4, 2) data = 20 * np.random.random(shape) layer = Shapes(data) assert layer.nshapes == shape[0] assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)]) assert layer.ndim == shape[2] assert np.all([s == 'rectangle' for s in layer.shape_type]) # Test a single two corner rectangle, which gets converted into four # corner rectangle shape = (1, 2, 2) data = 20 * np.random.random(shape) layer = Shapes(data) assert layer.nshapes == 1 assert len(layer.data[0]) == 4 assert layer.ndim == shape[2] assert np.all([s == 'rectangle' for s in layer.shape_type]) # Test multiple two corner rectangles shape = (10, 2, 2) data = 20 * np.random.random(shape) layer = Shapes(data) assert layer.nshapes == shape[0] assert np.all([len(ld) == 4 for ld in layer.data]) assert layer.ndim == shape[2] assert np.all([s == 'rectangle' for s in layer.shape_type]) def test_rectangles_with_shape_type(): """Test instantiating rectangles with shape_type in data""" # Test (rectangle, shape_type) tuple shape = (1, 4, 2) np.random.seed(0) vertices = 20 * np.random.random(shape) data = (vertices, "rectangle") layer = Shapes(data) assert layer.nshapes == shape[0] assert np.all(layer.data[0] == data[0]) assert layer.ndim == shape[2] assert np.all([s == 'rectangle' for s in layer.shape_type]) # Test (list of rectangles, shape_type) tuple shape = (10, 4, 2) vertices = 20 * np.random.random(shape) data = (vertices, "rectangle") layer = Shapes(data) assert layer.nshapes == shape[0] assert np.all([np.all(ld == d) for ld, d in zip(layer.data, vertices)]) assert layer.ndim == shape[2] assert np.all([s == 'rectangle' for s in layer.shape_type]) # Test list of (rectangle, shape_type) tuples data = [(vertices[i], "rectangle") for i in range(shape[0])] layer = Shapes(data) assert layer.nshapes == shape[0] assert np.all([np.all(ld == d) for ld, d in zip(layer.data, vertices)]) assert layer.ndim == shape[2] assert np.all([s == 'rectangle' for s in layer.shape_type]) def test_rectangles_roundtrip(): """Test a full roundtrip with rectangles data.""" shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes(data) new_layer = Shapes(layer.data) assert np.all([nd == d for nd, d in zip(new_layer.data, layer.data)]) def test_integer_rectangle(): """Test instantiating rectangles with integer data.""" shape = (10, 2, 2) np.random.seed(1) data = np.random.randint(20, size=shape) layer = Shapes(data) assert layer.nshapes == shape[0] assert np.all([len(ld) == 4 for ld in layer.data]) assert layer.ndim == shape[2] assert np.all([s == 'rectangle' for s in layer.shape_type]) def test_negative_rectangle(): """Test instantiating rectangles with negative data.""" shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) - 10 layer = Shapes(data) assert layer.nshapes == shape[0] assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)]) assert layer.ndim == shape[2] assert np.all([s == 'rectangle' for s in layer.shape_type]) def test_empty_rectangle(): """Test instantiating rectangles with empty data.""" shape = (0, 0, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes(data) assert layer.nshapes == shape[0] assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)]) assert layer.ndim == shape[2] assert np.all([s == 'rectangle' for s in layer.shape_type]) def test_3D_rectangles(): """Test instantiating Shapes layer with 3D planar rectangles.""" # Test a single four corner rectangle np.random.seed(0) planes = np.tile(np.arange(10).reshape((10, 1, 1)), (1, 4, 1)) corners = np.random.uniform(0, 10, size=(10, 4, 2)) data = np.concatenate((planes, corners), axis=2) layer = Shapes(data) assert layer.nshapes == len(data) assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)]) assert layer.ndim == 3 assert np.all([s == 'rectangle' for s in layer.shape_type]) def test_ellipses(): """Test instantiating Shapes layer with a random 2D ellipses.""" # Test a single four corner ellipses shape = (1, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes(data, shape_type='ellipse') assert layer.nshapes == shape[0] assert np.all(layer.data[0] == data[0]) assert layer.ndim == shape[2] assert np.all([s == 'ellipse' for s in layer.shape_type]) # Test multiple four corner ellipses shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes(data, shape_type='ellipse') assert layer.nshapes == shape[0] assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)]) assert layer.ndim == shape[2] assert np.all([s == 'ellipse' for s in layer.shape_type]) # Test a single ellipse center radii, which gets converted into four # corner ellipse shape = (1, 2, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes(data, shape_type='ellipse') assert layer.nshapes == 1 assert len(layer.data[0]) == 4 assert layer.ndim == shape[2] assert np.all([s == 'ellipse' for s in layer.shape_type]) # Test multiple center radii ellipses shape = (10, 2, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes(data, shape_type='ellipse') assert layer.nshapes == shape[0] assert np.all([len(ld) == 4 for ld in layer.data]) assert layer.ndim == shape[2] assert np.all([s == 'ellipse' for s in layer.shape_type]) def test_ellipses_with_shape_type(): """Test instantiating ellipses with shape_type in data""" # Test single four corner (vertices, shape_type) tuple shape = (1, 4, 2) np.random.seed(0) vertices = 20 * np.random.random(shape) data = (vertices, "ellipse") layer = Shapes(data) assert layer.nshapes == shape[0] assert np.all(layer.data[0] == data[0]) assert layer.ndim == shape[2] assert np.all([s == 'ellipse' for s in layer.shape_type]) # Test multiple four corner (list of vertices, shape_type) tuple shape = (10, 4, 2) np.random.seed(0) vertices = 20 * np.random.random(shape) data = (vertices, "ellipse") layer = Shapes(data) assert layer.nshapes == shape[0] assert np.all([np.all(ld == d) for ld, d in zip(layer.data, vertices)]) assert layer.ndim == shape[2] assert np.all([s == 'ellipse' for s in layer.shape_type]) # Test list of four corner (vertices, shape_type) tuples shape = (10, 4, 2) np.random.seed(0) vertices = 20 * np.random.random(shape) data = [(vertices[i], "ellipse") for i in range(shape[0])] layer = Shapes(data) assert layer.nshapes == shape[0] assert np.all([np.all(ld == d) for ld, d in zip(layer.data, vertices)]) assert layer.ndim == shape[2] assert np.all([s == 'ellipse' for s in layer.shape_type]) # Test single (center-radii, shape_type) ellipse shape = (1, 2, 2) np.random.seed(0) data = (20 * np.random.random(shape), "ellipse") layer = Shapes(data) assert layer.nshapes == 1 assert len(layer.data[0]) == 4 assert layer.ndim == shape[2] assert np.all([s == 'ellipse' for s in layer.shape_type]) # Test (list of center-radii, shape_type) tuple shape = (10, 2, 2) np.random.seed(0) center_radii = 20 * np.random.random(shape) data = (center_radii, "ellipse") layer = Shapes(data) assert layer.nshapes == shape[0] assert np.all([len(ld) == 4 for ld in layer.data]) assert layer.ndim == shape[2] assert np.all([s == 'ellipse' for s in layer.shape_type]) # Test list of (center-radii, shape_type) tuples shape = (10, 2, 2) np.random.seed(0) center_radii = 20 * np.random.random(shape) data = [(center_radii[i], "ellipse") for i in range(shape[0])] layer = Shapes(data) assert layer.nshapes == shape[0] assert np.all([len(ld) == 4 for ld in layer.data]) assert layer.ndim == shape[2] assert np.all([s == 'ellipse' for s in layer.shape_type]) def test_4D_ellispse(): """Test instantiating Shapes layer with 4D planar ellipse.""" # Test a single 4D ellipse np.random.seed(0) data = [ [ [3, 5, 108, 108], [3, 5, 108, 148], [3, 5, 148, 148], [3, 5, 148, 108], ] ] layer = Shapes(data, shape_type='ellipse') assert layer.nshapes == len(data) assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)]) assert layer.ndim == 4 assert np.all([s == 'ellipse' for s in layer.shape_type]) def test_ellipses_roundtrip(): """Test a full roundtrip with ellipss data.""" shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes(data, shape_type='ellipse') new_layer = Shapes(layer.data, shape_type='ellipse') assert np.all([nd == d for nd, d in zip(new_layer.data, layer.data)]) def test_lines(): """Test instantiating Shapes layer with a random 2D lines.""" # Test a single two end point line shape = (1, 2, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes(data, shape_type='line') assert layer.nshapes == shape[0] assert np.all(layer.data[0] == data[0]) assert layer.ndim == shape[2] assert np.all([s == 'line' for s in layer.shape_type]) # Test multiple lines shape = (10, 2, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes(data, shape_type='line') assert layer.nshapes == shape[0] assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)]) assert layer.ndim == shape[2] assert np.all([s == 'line' for s in layer.shape_type]) def test_lines_with_shape_type(): """Test instantiating lines with shape_type""" # Test (single line, shape_type) tuple shape = (1, 2, 2) np.random.seed(0) end_points = 20 * np.random.random(shape) data = (end_points, 'line') layer = Shapes(data) assert layer.nshapes == shape[0] assert np.all(layer.data[0] == end_points[0]) assert layer.ndim == shape[2] assert np.all([s == 'line' for s in layer.shape_type]) # Test (multiple lines, shape_type) tuple shape = (10, 2, 2) np.random.seed(0) end_points = 20 * np.random.random(shape) data = (end_points, "line") layer = Shapes(data) assert layer.nshapes == shape[0] assert np.all([np.all(ld == d) for ld, d in zip(layer.data, end_points)]) assert layer.ndim == shape[2] assert np.all([s == 'line' for s in layer.shape_type]) # Test list of (line, shape_type) tuples shape = (10, 2, 2) np.random.seed(0) end_points = 20 * np.random.random(shape) data = [(end_points[i], "line") for i in range(shape[0])] layer = Shapes(data) assert layer.nshapes == shape[0] assert np.all([np.all(ld == d) for ld, d in zip(layer.data, end_points)]) assert layer.ndim == shape[2] assert np.all([s == 'line' for s in layer.shape_type]) def test_lines_roundtrip(): """Test a full roundtrip with line data.""" shape = (10, 2, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes(data, shape_type='line') new_layer = Shapes(layer.data, shape_type='line') assert np.all([nd == d for nd, d in zip(new_layer.data, layer.data)]) def test_paths(): """Test instantiating Shapes layer with a random 2D paths.""" # Test a single path with 6 points shape = (1, 6, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes(data, shape_type='path') assert layer.nshapes == shape[0] assert np.all(layer.data[0] == data[0]) assert layer.ndim == shape[2] assert np.all([s == 'path' for s in layer.shape_type]) # Test multiple paths with different numbers of points data = [ 20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(10) ] layer = Shapes(data, shape_type='path') assert layer.nshapes == len(data) assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)]) assert layer.ndim == 2 assert np.all([s == 'path' for s in layer.shape_type]) def test_paths_with_shape_type(): """Test instantiating paths with shape_type in data""" # Test (single path, shape_type) tuple shape = (1, 6, 2) np.random.seed(0) path_points = 20 * np.random.random(shape) data = (path_points, "path") layer = Shapes(data) assert layer.nshapes == shape[0] assert np.all(layer.data[0] == path_points[0]) assert layer.ndim == shape[2] assert np.all([s == 'path' for s in layer.shape_type]) # Test (list of paths, shape_type) tuple path_points = [ 20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(10) ] data = (path_points, "path") layer = Shapes(data) assert layer.nshapes == len(path_points) assert np.all([np.all(ld == d) for ld, d in zip(layer.data, path_points)]) assert layer.ndim == 2 assert np.all([s == 'path' for s in layer.shape_type]) # Test list of (path, shape_type) tuples data = [(path_points[i], "path") for i in range(len(path_points))] layer = Shapes(data) assert layer.nshapes == len(data) assert np.all([np.all(ld == d) for ld, d in zip(layer.data, path_points)]) assert layer.ndim == 2 assert np.all([s == 'path' for s in layer.shape_type]) def test_paths_roundtrip(): """Test a full roundtrip with path data.""" np.random.seed(0) data = [ 20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(10) ] layer = Shapes(data, shape_type='path') new_layer = Shapes(layer.data, shape_type='path') assert np.all( [np.all(nd == d) for nd, d in zip(new_layer.data, layer.data)] ) def test_polygons(): """Test instantiating Shapes layer with a random 2D polygons.""" # Test a single polygon with 6 points shape = (1, 6, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes(data, shape_type='polygon') assert layer.nshapes == shape[0] assert np.all(layer.data[0] == data[0]) assert layer.ndim == shape[2] assert np.all([s == 'polygon' for s in layer.shape_type]) # Test multiple polygons with different numbers of points data = [ 20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(10) ] layer = Shapes(data, shape_type='polygon') assert layer.nshapes == len(data) assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)]) assert layer.ndim == 2 assert np.all([s == 'polygon' for s in layer.shape_type]) def test_polygons_with_shape_type(): """Test 2D polygons with shape_type in data""" # Test single (polygon, shape_type) tuple shape = (1, 6, 2) np.random.seed(0) vertices = 20 * np.random.random(shape) data = (vertices, 'polygon') layer = Shapes(data) assert layer.nshapes == shape[0] assert np.all(layer.data[0] == vertices[0]) assert layer.ndim == shape[2] assert np.all([s == 'polygon' for s in layer.shape_type]) # Test (list of polygons, shape_type) tuple polygons = [ 20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(10) ] data = (polygons, 'polygon') layer = Shapes(data) assert layer.nshapes == len(polygons) assert np.all([np.all(ld == d) for ld, d in zip(layer.data, polygons)]) assert layer.ndim == 2 assert np.all([s == 'polygon' for s in layer.shape_type]) # Test list of (polygon, shape_type) tuples data = [(polygons[i], 'polygon') for i in range(len(polygons))] layer = Shapes(data) assert layer.nshapes == len(polygons) assert np.all([np.all(ld == d) for ld, d in zip(layer.data, polygons)]) assert layer.ndim == 2 assert np.all([s == 'polygon' for s in layer.shape_type]) def test_polygon_roundtrip(): """Test a full roundtrip with polygon data.""" np.random.seed(0) data = [ 20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(10) ] layer = Shapes(data, shape_type='polygon') new_layer = Shapes(layer.data, shape_type='polygon') assert np.all( [np.all(nd == d) for nd, d in zip(new_layer.data, layer.data)] ) def test_mixed_shapes(): """Test instantiating Shapes layer with a mix of random 2D shapes.""" # Test multiple polygons with different numbers of points np.random.seed(0) shape_vertices = [ 20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(5) ] + list(np.random.random((5, 4, 2))) shape_type = ['polygon'] * 5 + ['rectangle'] * 3 + ['ellipse'] * 2 layer = Shapes(shape_vertices, shape_type=shape_type) assert layer.nshapes == len(shape_vertices) assert np.all( [np.all(ld == d) for ld, d in zip(layer.data, shape_vertices)] ) assert layer.ndim == 2 assert np.all([s == so for s, so in zip(layer.shape_type, shape_type)]) # Test roundtrip with mixed data new_layer = Shapes(layer.data, shape_type=layer.shape_type) assert np.all( [np.all(nd == d) for nd, d in zip(new_layer.data, layer.data)] ) assert np.all( [ns == s for ns, s in zip(new_layer.shape_type, layer.shape_type)] ) def test_mixed_shapes_with_shape_type(): """Test adding mixed shapes with shape_type in data""" np.random.seed(0) shape_vertices = [ 20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(5) ] + list(np.random.random((5, 4, 2))) shape_type = ['polygon'] * 5 + ['rectangle'] * 3 + ['ellipse'] * 2 # Test multiple (shape, shape_type) tuples data = list(zip(shape_vertices, shape_type)) layer = Shapes(data) assert layer.nshapes == len(shape_vertices) assert np.all( [np.all(ld == d) for ld, d in zip(layer.data, shape_vertices)] ) assert layer.ndim == 2 assert np.all([s == so for s, so in zip(layer.shape_type, shape_type)]) def test_data_shape_type_overwrites_meta(): """Test shape type passed through data property overwrites metadata shape type""" shape = (10, 4, 2) np.random.seed(0) vertices = 20 * np.random.random(shape) data = (vertices, "ellipse") layer = Shapes(data, shape_type='rectangle') assert np.all([s == 'ellipse' for s in layer.shape_type]) data = [(vertices[i], "ellipse") for i in range(shape[0])] layer = Shapes(data, shape_type='rectangle') assert np.all([s == 'ellipse' for s in layer.shape_type]) def test_changing_shapes(): """Test changing Shapes data.""" shape_a = (10, 4, 2) shape_b = (20, 4, 2) np.random.seed(0) vertices_a = 20 * np.random.random(shape_a) vertices_b = 20 * np.random.random(shape_b) layer = Shapes(vertices_a) assert layer.nshapes == shape_a[0] layer.data = vertices_b assert layer.nshapes == shape_b[0] assert np.all([np.all(ld == d) for ld, d in zip(layer.data, vertices_b)]) assert layer.ndim == shape_b[2] assert np.all([s == 'rectangle' for s in layer.shape_type]) # setting data with shape type data_a = (vertices_a, "ellipse") layer.data = data_a assert layer.nshapes == shape_a[0] assert np.all([np.all(ld == d) for ld, d in zip(layer.data, vertices_a)]) assert layer.ndim == shape_a[2] assert np.all([s == 'ellipse' for s in layer.shape_type]) # setting data with fewer shapes smaller_data = vertices_a[:5] current_edge_color = layer._data_view.edge_color current_edge_width = layer._data_view.edge_widths current_face_color = layer._data_view.face_color current_z = layer._data_view.z_indices layer.data = smaller_data assert layer.nshapes == smaller_data.shape[0] assert np.allclose(layer._data_view.edge_color, current_edge_color[:5]) assert np.allclose(layer._data_view.face_color, current_face_color[:5]) assert np.allclose(layer._data_view.edge_widths, current_edge_width[:5]) assert np.allclose(layer._data_view.z_indices, current_z[:5]) # setting data with added shapes current_edge_color = layer._data_view.edge_color current_edge_width = layer._data_view.edge_widths current_face_color = layer._data_view.face_color current_z = layer._data_view.z_indices bigger_data = vertices_b layer.data = bigger_data assert layer.nshapes == bigger_data.shape[0] assert np.allclose(layer._data_view.edge_color[:5], current_edge_color) assert np.allclose(layer._data_view.face_color[:5], current_face_color) assert np.allclose(layer._data_view.edge_widths[:5], current_edge_width) assert np.allclose(layer._data_view.z_indices[:5], current_z) def test_changing_shape_type(): """Test changing shape type""" np.random.seed(0) rectangles = 20 * np.random.random((10, 4, 2)) layer = Shapes(rectangles, shape_type='rectangle') layer.shape_type = "ellipse" assert np.all([s == 'ellipse' for s in layer.shape_type]) def test_adding_shapes(): """Test adding shapes.""" # Start with polygons with different numbers of points np.random.seed(0) data = [ 20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(5) ] # shape_type = ['polygon'] * 5 + ['rectangle'] * 3 + ['ellipse'] * 2 layer = Shapes(data, shape_type='polygon') new_data = np.random.random((5, 4, 2)) new_shape_type = ['rectangle'] * 3 + ['ellipse'] * 2 layer.add(new_data, shape_type=new_shape_type) all_data = data + list(new_data) all_shape_type = ['polygon'] * 5 + new_shape_type assert layer.nshapes == len(all_data) assert np.all([np.all(ld == d) for ld, d in zip(layer.data, all_data)]) assert layer.ndim == 2 assert np.all([s == so for s, so in zip(layer.shape_type, all_shape_type)]) # test adding data with shape_type new_vertices = np.random.random((5, 4, 2)) new_shape_type2 = ['ellipse'] * 3 + ['rectangle'] * 2 new_data2 = list(zip(new_vertices, new_shape_type2)) layer.add(new_data2) all_vertices = all_data + list(new_vertices) all_shape_type = all_shape_type + new_shape_type2 assert layer.nshapes == len(all_vertices) assert np.all([np.all(ld == d) for ld, d in zip(layer.data, all_vertices)]) assert layer.ndim == 2 assert np.all([s == so for s, so in zip(layer.shape_type, all_shape_type)]) def test_adding_shapes_to_empty(): """Test adding shapes to empty.""" data = np.empty((0, 0, 2)) np.random.seed(0) layer = Shapes(np.empty((0, 0, 2))) assert len(layer.data) == 0 data = [ 20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(5) ] + list(np.random.random((5, 4, 2))) shape_type = ['path'] * 5 + ['rectangle'] * 3 + ['ellipse'] * 2 layer.add(data, shape_type=shape_type) assert layer.nshapes == len(data) assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)]) assert layer.ndim == 2 assert np.all([s == so for s, so in zip(layer.shape_type, shape_type)]) def test_selecting_shapes(): """Test selecting shapes.""" data = 20 * np.random.random((10, 4, 2)) np.random.seed(0) layer = Shapes(data) layer.selected_data = {0, 1} assert layer.selected_data == {0, 1} layer.selected_data = {9} assert layer.selected_data == {9} layer.selected_data = set() assert layer.selected_data == set() def test_removing_all_shapes_empty_list(): """Test removing all shapes with an empty list.""" data = 20 * np.random.random((10, 4, 2)) np.random.seed(0) layer = Shapes(data) assert layer.nshapes == 10 layer.data = [] assert layer.nshapes == 0 def test_removing_all_shapes_empty_array(): """Test removing all shapes with an empty list.""" data = 20 * np.random.random((10, 4, 2)) np.random.seed(0) layer = Shapes(data) assert layer.nshapes == 10 layer.data = np.empty((0, 2)) assert layer.nshapes == 0 def test_removing_selected_shapes(): """Test removing selected shapes.""" np.random.seed(0) data = [ 20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(5) ] + list(np.random.random((5, 4, 2))) shape_type = ['polygon'] * 5 + ['rectangle'] * 3 + ['ellipse'] * 2 layer = Shapes(data, shape_type=shape_type) # With nothing selected no points should be removed layer.remove_selected() assert len(layer.data) == len(data) # Select three shapes and remove them layer.selected_data = {1, 7, 8} layer.remove_selected() keep = [0] + list(range(2, 7)) + [9] data_keep = [data[i] for i in keep] shape_type_keep = [shape_type[i] for i in keep] assert len(layer.data) == len(data_keep) assert len(layer.selected_data) == 0 assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data_keep)]) assert layer.ndim == 2 assert np.all( [s == so for s, so in zip(layer.shape_type, shape_type_keep)] ) def test_changing_modes(): """Test changing modes.""" np.random.seed(0) data = 20 * np.random.random((10, 4, 2)) layer = Shapes(data) assert layer.mode == 'pan_zoom' assert layer.interactive is True layer.mode = 'select' assert layer.mode == 'select' assert layer.interactive is False layer.mode = 'direct' assert layer.mode == 'direct' assert layer.interactive is False layer.mode = 'vertex_insert' assert layer.mode == 'vertex_insert' assert layer.interactive is False layer.mode = 'vertex_remove' assert layer.mode == 'vertex_remove' assert layer.interactive is False layer.mode = 'add_rectangle' assert layer.mode == 'add_rectangle' assert layer.interactive is False layer.mode = 'add_ellipse' assert layer.mode == 'add_ellipse' assert layer.interactive is False layer.mode = 'add_line' assert layer.mode == 'add_line' assert layer.interactive is False layer.mode = 'add_path' assert layer.mode == 'add_path' assert layer.interactive is False layer.mode = 'add_polygon' assert layer.mode == 'add_polygon' assert layer.interactive is False layer.mode = 'pan_zoom' assert layer.mode == 'pan_zoom' assert layer.interactive is True def test_name(): """Test setting layer name.""" np.random.seed(0) data = 20 * np.random.random((10, 4, 2)) layer = Shapes(data) assert layer.name == 'Shapes' layer = Shapes(data, name='random') assert layer.name == 'random' layer.name = 'shps' assert layer.name == 'shps' def test_visiblity(): """Test setting layer visibility.""" np.random.seed(0) data = 20 * np.random.random((10, 4, 2)) layer = Shapes(data) assert layer.visible is True layer.visible = False assert layer.visible is False layer = Shapes(data, visible=False) assert layer.visible is False layer.visible = True assert layer.visible is True def test_opacity(): """Test setting opacity.""" shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes(data) # Check default opacity value of 0.7 assert layer.opacity == 0.7 # Select data and change opacity of selection layer.selected_data = {0, 1} assert layer.opacity == 0.7 layer.opacity = 0.5 assert layer.opacity == 0.5 # Add new shape and test its width new_shape = np.random.random((1, 4, 2)) layer.selected_data = set() layer.add(new_shape) assert layer.opacity == 0.5 # Instantiate with custom opacity layer2 = Shapes(data, opacity=0.2) assert layer2.opacity == 0.2 # Check removing data shouldn't change opacity layer2.selected_data = {0, 2} layer2.remove_selected() assert len(layer2.data) == shape[0] - 2 assert layer2.opacity == 0.2 def test_blending(): """Test setting layer blending.""" np.random.seed(0) data = 20 * np.random.random((10, 4, 2)) layer = Shapes(data) assert layer.blending == 'translucent' layer.blending = 'additive' assert layer.blending == 'additive' layer = Shapes(data, blending='additive') assert layer.blending == 'additive' layer.blending = 'opaque' assert layer.blending == 'opaque' @pytest.mark.filterwarnings("ignore:elementwise comparison fail:FutureWarning") @pytest.mark.parametrize("attribute", ['edge', 'face']) def test_switch_color_mode(attribute): """Test switching between color modes""" shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) # create a continuous property with a known value in the last element continuous_prop = np.random.random((shape[0],)) continuous_prop[-1] = 1 properties = { 'shape_truthiness': continuous_prop, 'shape_type': _make_cycled_properties(['A', 'B'], shape[0]), } initial_color = [1, 0, 0, 1] color_cycle = ['red', 'blue'] color_kwarg = f'{attribute}_color' colormap_kwarg = f'{attribute}_colormap' color_cycle_kwarg = f'{attribute}_color_cycle' args = { color_kwarg: initial_color, colormap_kwarg: 'gray', color_cycle_kwarg: color_cycle, } layer = Shapes(data, properties=properties, **args) layer_color_mode = getattr(layer, f'{attribute}_color_mode') layer_color = getattr(layer, f'{attribute}_color') assert layer_color_mode == 'direct' np.testing.assert_allclose( layer_color, np.repeat([initial_color], shape[0], axis=0) ) # there should not be an edge_color_property color_property = getattr(layer, f'_{attribute}_color_property') assert color_property == '' # transitioning to colormap should raise a warning # because there isn't an edge color property yet and # the first property in shapes.properties is being automatically selected with pytest.warns(UserWarning): setattr(layer, f'{attribute}_color_mode', 'colormap') color_property = getattr(layer, f'_{attribute}_color_property') assert color_property == next(iter(properties)) layer_color = getattr(layer, f'{attribute}_color') np.testing.assert_allclose(layer_color[-1], [1, 1, 1, 1]) # switch to color cycle setattr(layer, f'{attribute}_color_mode', 'cycle') setattr(layer, f'{attribute}_color', 'shape_type') color = getattr(layer, f'{attribute}_color') layer_color = transform_color(color_cycle * int(shape[0] / 2)) np.testing.assert_allclose(color, layer_color) # switch back to direct, edge_colors shouldn't change setattr(layer, f'{attribute}_color_mode', 'direct') new_edge_color = getattr(layer, f'{attribute}_color') np.testing.assert_allclose(new_edge_color, color) @pytest.mark.parametrize("attribute", ['edge', 'face']) def test_color_direct(attribute: str): """Test setting face/edge color directly.""" shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer_kwargs = {f'{attribute}_color': 'black'} layer = Shapes(data, **layer_kwargs) color_array = transform_color(['black'] * shape[0]) current_color = getattr(layer, f'current_{attribute}_color') layer_color = getattr(layer, f'{attribute}_color') assert current_color == 'black' assert len(layer.edge_color) == shape[0] np.testing.assert_allclose(color_array, layer_color) # With no data selected changing color has no effect setattr(layer, f'current_{attribute}_color', 'blue') current_color = getattr(layer, f'current_{attribute}_color') assert current_color == 'blue' np.testing.assert_allclose(color_array, layer_color) # Select data and change edge color of selection selected_data = {0, 1} layer.selected_data = {0, 1} current_color = getattr(layer, f'current_{attribute}_color') assert current_color == 'black' setattr(layer, f'current_{attribute}_color', 'green') colorarray_green = transform_color(['green'] * len(layer.selected_data)) color_array[list(selected_data)] = colorarray_green layer_color = getattr(layer, f'{attribute}_color') np.testing.assert_allclose(color_array, layer_color) # Add new shape and test its color new_shape = np.random.random((1, 4, 2)) layer.selected_data = set() setattr(layer, f'current_{attribute}_color', 'blue') layer.add(new_shape) color_array = np.vstack([color_array, transform_color('blue')]) layer_color = getattr(layer, f'{attribute}_color') assert len(layer_color) == shape[0] + 1 np.testing.assert_allclose(color_array, layer_color) # Check removing data adjusts colors correctly layer.selected_data = {0, 2} layer.remove_selected() assert len(layer.data) == shape[0] - 1 layer_color = getattr(layer, f'{attribute}_color') assert len(layer_color) == shape[0] - 1 np.testing.assert_allclose( layer_color, np.vstack((color_array[1], color_array[3:])), ) # set the color directly setattr(layer, f'{attribute}_color', 'black') color_array = np.tile([[0, 0, 0, 1]], (len(layer.data), 1)) layer_color = getattr(layer, f'{attribute}_color') np.testing.assert_allclose(color_array, layer_color) @pytest.mark.parametrize("attribute", ['edge', 'face']) def test_single_shape_properties(attribute): """Test creating single shape with properties""" shape = (4, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer_kwargs = {f'{attribute}_color': 'red'} layer = Shapes(data, **layer_kwargs) layer_color = getattr(layer, f'{attribute}_color') assert len(layer_color) == 1 np.testing.assert_allclose([1, 0, 0, 1], layer_color[0]) color_cycle_str = ['red', 'blue'] color_cycle_rgb = [[1, 0, 0], [0, 0, 1]] color_cycle_rgba = [[1, 0, 0, 1], [0, 0, 1, 1]] @pytest.mark.parametrize("attribute", ['edge', 'face']) @pytest.mark.parametrize( "color_cycle", [color_cycle_str, color_cycle_rgb, color_cycle_rgba], ) def test_color_cycle(attribute, color_cycle): """Test setting edge/face color with a color cycle list""" # create Shapes using list color cycle shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) properties = {'shape_type': _make_cycled_properties(['A', 'B'], shape[0])} shapes_kwargs = { 'properties': properties, f'{attribute}_color': 'shape_type', f'{attribute}_color_cycle': color_cycle, } layer = Shapes(data, **shapes_kwargs) assert layer.properties == properties color_array = transform_color( list(islice(cycle(color_cycle), 0, shape[0])) ) layer_color = getattr(layer, f'{attribute}_color') np.testing.assert_allclose(layer_color, color_array) # Add new shape and test its color new_shape = np.random.random((1, 4, 2)) layer.selected_data = {0} layer.add(new_shape) layer_color = getattr(layer, f'{attribute}_color') assert len(layer_color) == shape[0] + 1 np.testing.assert_allclose( layer_color, np.vstack((color_array, transform_color('red'))), ) # Check removing data adjusts colors correctly layer.selected_data = {0, 2} layer.remove_selected() assert len(layer.data) == shape[0] - 1 layer_color = getattr(layer, f'{attribute}_color') assert len(layer_color) == shape[0] - 1 np.testing.assert_allclose( layer_color, np.vstack((color_array[1], color_array[3:], transform_color('red'))), ) # refresh colors layer.refresh_colors(update_color_mapping=True) # test adding a shape with a new property value layer.selected_data = {} current_properties = layer.current_properties current_properties['shape_type'] = np.array(['new']) layer.current_properties = current_properties new_shape_2 = np.random.random((1, 4, 2)) layer.add(new_shape_2) color_cycle_map = getattr(layer, f'{attribute}_color_cycle_map') assert 'new' in color_cycle_map np.testing.assert_allclose( color_cycle_map['new'], np.squeeze(transform_color(color_cycle[0])) ) @pytest.mark.parametrize("attribute", ['edge', 'face']) def test_add_color_cycle_to_empty_layer(attribute): """Test adding a shape to an empty layer when edge/face color is a color cycle See: https://github.com/napari/napari/pull/1069 """ default_properties = {'shape_type': np.array(['A'])} color_cycle = ['red', 'blue'] shapes_kwargs = { 'properties': default_properties, f'{attribute}_color': 'shape_type', f'{attribute}_color_cycle': color_cycle, } layer = Shapes(**shapes_kwargs) # verify the current_edge_color is correct expected_color = transform_color(color_cycle[0]) current_color = getattr(layer, f'_current_{attribute}_color') np.testing.assert_allclose(current_color, expected_color) # add a shape np.random.seed(0) new_shape = 20 * np.random.random((1, 4, 2)) layer.add(new_shape) props = {'shape_type': np.array(['A'])} expected_color = np.array([[1, 0, 0, 1]]) np.testing.assert_equal(layer.properties, props) attribute_color = getattr(layer, f'{attribute}_color') np.testing.assert_allclose(attribute_color, expected_color) # add a shape with a new property layer.selected_data = [] layer.current_properties = {'shape_type': np.array(['B'])} new_shape_2 = 20 * np.random.random((1, 4, 2)) layer.add(new_shape_2) new_color = np.array([0, 0, 1, 1]) expected_color = np.vstack((expected_color, new_color)) new_properties = {'shape_type': np.array(['A', 'B'])} attribute_color = getattr(layer, f'{attribute}_color') np.testing.assert_allclose(attribute_color, expected_color) np.testing.assert_equal(layer.properties, new_properties) @pytest.mark.parametrize("attribute", ['edge', 'face']) def test_adding_value_color_cycle(attribute): """Test that adding values to properties used to set a color cycle and then calling Shapes.refresh_colors() performs the update and adds the new value to the face/edge_color_cycle_map. """ shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) properties = {'shape_type': _make_cycled_properties(['A', 'B'], shape[0])} color_cycle = ['red', 'blue'] shapes_kwargs = { 'properties': properties, f'{attribute}_color': 'shape_type', f'{attribute}_color_cycle': color_cycle, } layer = Shapes(data, **shapes_kwargs) # make shape 0 shape_type C shape_types = layer.properties['shape_type'] shape_types[0] = 'C' layer.properties['shape_type'] = shape_types layer.refresh_colors(update_color_mapping=False) color_cycle_map = getattr(layer, f'{attribute}_color_cycle_map') color_map_keys = [*color_cycle_map] assert 'C' in color_map_keys @pytest.mark.parametrize("attribute", ['edge', 'face']) def test_color_colormap(attribute): """Test setting edge/face color with a colormap""" # create Shapes using with a colormap shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) properties = {'shape_type': _make_cycled_properties([0, 1.5], shape[0])} shapes_kwargs = { 'properties': properties, f'{attribute}_color': 'shape_type', f'{attribute}_colormap': 'gray', } layer = Shapes(data, **shapes_kwargs) assert layer.properties == properties color_mode = getattr(layer, f'{attribute}_color_mode') assert color_mode == 'colormap' color_array = transform_color(['black', 'white'] * int(shape[0] / 2)) attribute_color = getattr(layer, f'{attribute}_color') assert np.all(attribute_color == color_array) # change the color cycle - face_color should not change setattr(layer, f'{attribute}_color_cycle', ['red', 'blue']) attribute_color = getattr(layer, f'{attribute}_color') assert np.all(attribute_color == color_array) # Add new shape and test its color new_shape = np.random.random((1, 4, 2)) layer.selected_data = {0} layer.add(new_shape) attribute_color = getattr(layer, f'{attribute}_color') assert len(attribute_color) == shape[0] + 1 np.testing.assert_allclose( attribute_color, np.vstack((color_array, transform_color('black'))), ) # Check removing data adjusts colors correctly layer.selected_data = {0, 2} layer.remove_selected() assert len(layer.data) == shape[0] - 1 attribute_color = getattr(layer, f'{attribute}_color') assert len(attribute_color) == shape[0] - 1 np.testing.assert_allclose( attribute_color, np.vstack( ( color_array[1], color_array[3:], transform_color('black'), ) ), ) # adjust the clims setattr(layer, f'{attribute}_contrast_limits', (0, 3)) layer.refresh_colors(update_color_mapping=False) attribute_color = getattr(layer, f'{attribute}_color') np.testing.assert_allclose(attribute_color[-2], [0.5, 0.5, 0.5, 1]) # change the colormap new_colormap = 'viridis' setattr(layer, f'{attribute}_colormap', new_colormap) attribute_colormap = getattr(layer, f'{attribute}_colormap') assert attribute_colormap.name == new_colormap @pytest.mark.parametrize("attribute", ['edge', 'face']) def test_colormap_without_properties(attribute): """Setting the colormode to colormap should raise an exception""" shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes(data) with pytest.raises(ValueError): setattr(layer, f'{attribute}_color_mode', 'colormap') @pytest.mark.parametrize("attribute", ['edge', 'face']) def test_colormap_with_categorical_properties(attribute): """Setting the colormode to colormap should raise an exception""" shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) properties = {'shape_type': _make_cycled_properties(['A', 'B'], shape[0])} layer = Shapes(data, properties=properties) with pytest.raises(TypeError): with pytest.warns(UserWarning): setattr(layer, f'{attribute}_color_mode', 'colormap') @pytest.mark.parametrize("attribute", ['edge', 'face']) def test_add_colormap(attribute): """Test directly adding a vispy Colormap object""" shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) annotations = {'shape_type': _make_cycled_properties([0, 1.5], shape[0])} color_kwarg = f'{attribute}_color' colormap_kwarg = f'{attribute}_colormap' args = {color_kwarg: 'shape_type', colormap_kwarg: 'viridis'} layer = Shapes(data, properties=annotations, **args) setattr(layer, f'{attribute}_colormap', 'gray') layer_colormap = getattr(layer, f'{attribute}_colormap') assert layer_colormap.name == 'gray' def test_edge_width(): """Test setting edge width.""" shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes(data) assert layer.current_edge_width == 1 assert len(layer.edge_width) == shape[0] assert layer.edge_width == [1] * shape[0] # With no data selected changing edge width has no effect layer.current_edge_width = 2 assert layer.current_edge_width == 2 assert layer.edge_width == [1] * shape[0] # Select data and change edge color of selection layer.selected_data = {0, 1} assert layer.current_edge_width == 1 layer.current_edge_width = 3 assert layer.edge_width == [3] * 2 + [1] * (shape[0] - 2) # Add new shape and test its width new_shape = np.random.random((1, 4, 2)) layer.selected_data = set() layer.current_edge_width = 4 layer.add(new_shape) assert layer.edge_width == [3] * 2 + [1] * (shape[0] - 2) + [4] # Instantiate with custom edge width layer = Shapes(data, edge_width=5) assert layer.current_edge_width == 5 # Instantiate with custom edge width list width_list = [2, 3] * 5 layer = Shapes(data, edge_width=width_list) assert layer.current_edge_width == 1 assert layer.edge_width == width_list # Add new shape and test its color layer.current_edge_width = 4 layer.add(new_shape) assert len(layer.edge_width) == shape[0] + 1 assert layer.edge_width == width_list + [4] # Check removing data adjusts colors correctly layer.selected_data = {0, 2} layer.remove_selected() assert len(layer.data) == shape[0] - 1 assert len(layer.edge_width) == shape[0] - 1 assert layer.edge_width == [width_list[1]] + width_list[3:] + [4] # Test setting edge width with number layer.edge_width = 4 assert all([width == 4 for width in layer.edge_width]) # Test setting edge width with list new_widths = [2] * 5 + [3] * 4 layer.edge_width = new_widths assert layer.edge_width == new_widths # Test setting with incorrect size list throws error new_widths = [2, 3] with pytest.raises(ValueError): layer.edge_width = new_widths def test_z_index(): """Test setting z-index during instantiation.""" shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes(data) assert layer.z_index == [0] * shape[0] # Instantiate with custom z-index layer = Shapes(data, z_index=4) assert layer.z_index == [4] * shape[0] # Instantiate with custom z-index list z_index_list = [2, 3] * 5 layer = Shapes(data, z_index=z_index_list) assert layer.z_index == z_index_list # Add new shape and its z-index new_shape = np.random.random((1, 4, 2)) layer.add(new_shape) assert len(layer.z_index) == shape[0] + 1 assert layer.z_index == z_index_list + [4] # Check removing data adjusts colors correctly layer.selected_data = {0, 2} layer.remove_selected() assert len(layer.data) == shape[0] - 1 assert len(layer.z_index) == shape[0] - 1 assert layer.z_index == [z_index_list[1]] + z_index_list[3:] + [4] # Test setting index with number layer.z_index = 4 assert all([idx == 4 for idx in layer.z_index]) # Test setting index with list new_z_indices = [2] * 5 + [3] * 4 layer.z_index = new_z_indices assert layer.z_index == new_z_indices # Test setting with incorrect size list throws error new_z_indices = [2, 3] with pytest.raises(ValueError): layer.z_index = new_z_indices def test_move_to_front(): """Test moving shapes to front.""" shape = (10, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) z_index_list = [2, 3] * 5 layer = Shapes(data, z_index=z_index_list) assert layer.z_index == z_index_list # Move selected shapes to front layer.selected_data = {0, 2} layer.move_to_front() assert layer.z_index == [4] + [z_index_list[1]] + [4] + z_index_list[3:] def test_move_to_back(): """Test moving shapes to back.""" shape = (10, 4, 2) np.random.seed(0) data = 20 *
np.random.random(shape)
numpy.random.random
#https://github.com/tommyfms2/pix2pix-keras-byt #http://toxweblog.toxbe.com/2017/12/24/keras-%e3%81%a7-pix2pix-%e3%82%92%e5%ae%9f%e8%a3%85/ #tommyfms2/pix2pix-keras-byt より import os import argparse import numpy as np import h5py import time import matplotlib as mpl mpl.use('Agg') import matplotlib.pylab as plt from keras.preprocessing.image import load_img, img_to_array import keras.backend as K from keras.utils import generic_utils from keras.optimizers import Adam, SGD import models def my_normalization(X): return X / 127.5 - 1 def my_inverse_normalization(X): return (X + 1.) / 2. def to3d(X): if X.shape[-1]==3: return X b = X.transpose(3,1,2,0) c = np.array([b[0],b[0],b[0]]) return c.transpose(3,1,2,0) def plot_generated_batch(X_proc, X_raw, generator_model, batch_size, suffix): X_gen = generator_model.predict(X_raw) X_raw = my_inverse_normalization(X_raw) X_proc = my_inverse_normalization(X_proc) #超えた絵合わせのため、解画像 X_gen = my_inverse_normalization(X_gen) Xs = to3d(X_raw[:10]) Xg = to3d(X_gen[:10]) Xr = to3d(X_proc[:10]) Xs = np.concatenate(Xs, axis=1) Xg = np.concatenate(Xg, axis=1) Xr =
np.concatenate(Xr, axis=1)
numpy.concatenate
''' Map from the ACA catalogue to the IRAM catalogue from Corbelli+2017 We'll also determine which clouds have 13CO detected and their evolutionary phase ''' from astropy.table import Table from astropy.io import fits import os import astropy.units as u from astropy.coordinates import SkyCoord from spectral_cube import SpectralCube import numpy as np # data_path = os.path.expanduser("~/storage/M33/") data_path = os.path.expanduser("~/bigdata/ekoch/M33/") aca_path = f"{data_path}/ALMA/ACA_Band6/" corbelli_table = Table.read(f"{data_path}/Corbelli_17_catalogues/J_A+A_601_A146_table5.dat.fits") aca_table = Table.read(f"{aca_path}/cprops_12CO21/M33_ACA_12CO21_0p7kms_fullmosaic_roundbeam.image_K_M33_co21_m33_props.fits") # The beam is ~12 arcsec. We're going to require matched clouds be within # 1.5 beams max_sep = 12 * u.arcsec * 1.5 iram_cloud_coords = SkyCoord(corbelli_table['RAdeg'], corbelli_table['DEdeg'], frame='icrs') dist_matrix = np.zeros((len(aca_table), len(corbelli_table))) * u.deg for idx in range(len(aca_table)): cloud_coord = SkyCoord(aca_table['XCTR_DEG'][idx] * u.deg, aca_table['YCTR_DEG'][idx] * u.deg, frame='icrs') dist_matrix[idx] = cloud_coord.separation(iram_cloud_coords) # Match the clouds. Assume that each ACA cloud is associated with 0 or 1 # IRAM clouds mapping_dict = {} iram_cloud_index = np.arange(len(corbelli_table)) for idx in range(len(aca_table)): mapping_dict[idx] = [] matches =
np.where(dist_matrix[idx] < max_sep)
numpy.where
import unittest import matplotlib.pyplot as plt import numpy as np from vaws.model.curve import vulnerability_weibull, vulnerability_weibull_pdf def single_exponential_given_V(beta_, alpha_, x_arr): """ compute Args: beta_: parameter for vulnerability curve alpha_: parameter for vulnerability curve x_arr: 3sec gust wind speed at 10m height Returns: damage index """ exponent_ = -1.0 * np.power(x_arr / np.exp(beta_), 1.0 / alpha_) return 1 -
np.exp(exponent_)
numpy.exp
import os import cv2 import random import numpy as np from Augmenter import utils class BaseAugmenter(object): """ Parent class for all object types in the image that can be augmented """ def __init__(self, image, label, class_id, placement_id=None, horizon_line=None, max_height=None, max_iou=0.4, padding=10, min_px=10, sigma=0): """ Constructor image: image to be augmented label: semantic label to be modified class_id: BGR value of object to be copied into the image placement_id: possible locations for the object to be placed horizon_line: location of the horizon for scaling accurately max_height: size of the object if it were copied in an area closest to the camera max_iou: maximum overlap allowed between objects of same class padding: padding applied around roi for optimal blurring min_px: number of pixels tall the scaled object should be to consider it a valid copy paste sigma: increase/decrease the value to decrease/increase the scaling ratio """ self.called = 0 self.counter = 0 self.limits = None self.sigma = sigma self.max_iou = max_iou self.padding = padding self.min_px = min_px self.rows, self.cols, _ = image.shape self.image = image.copy() self.label = label.copy() self.class_id = class_id self.fake_class_id = [i if i == 255 else i + 1 for i in class_id] self.placement_id = placement_id self.horizon_line = horizon_line self.max_height = max_height if self.max_height is None: self.max_height = self.rows * 0.8 if placement_id is not None: self.row_value, self.col_value = utils.threshold(image, label, placement_id) else: self.row_value, self.col_value = np.mgrid[0:len(range(self.rows)), 0:len(range(self.cols))] self.row_value, self.col_value = self.row_value.ravel(), self.col_value() if self.horizon_line is not None: self.col_value = self.col_value[self.row_value - self.horizon_line > 0] self.row_value = self.row_value[self.row_value - self.horizon_line > 0] # Initialize scaling triangle # pt1 # . # pt2 . . pt3 # pt1 = main_triangle_side = (horizon_line, cols / 2) # pt2 = (rows, 0) self.main_triangle_side = np.sqrt(np.power(self.horizon_line - self.rows, 2) + np.power(self.cols / 2, 2)) self.slope = float(self.horizon_line - self.rows) / (self.cols / 2) self.y_intercept = self.rows self.copy_row_value = self.row_value self.copy_col_value = self.col_value self.class_placement = utils.get_class_pos(self.label, self.class_id) def set_limit(self, limit): """ Filters the placement array to constrain the number of augmented pixels per image. limit = (lower_percent, higher_percent) percentage of the total image height requested """ assert self.horizon_line is not None, "Can't call set_limit without setting a horizon line!" self.limits = limit self.col_value = self.copy_col_value self.row_value = self.copy_row_value min_scaled_class_height, max_scaled_class_height = np.array(limit) * self.rows min_ratio = float(min_scaled_class_height) / self.max_height max_ratio = float(max_scaled_class_height) / self.max_height min_cur_triangle_side = min_ratio * (self.main_triangle_side + self.sigma) max_cur_triangle_side = max_ratio * (self.main_triangle_side + self.sigma) y_min = (min_cur_triangle_side * (self.rows - self.horizon_line) / self.main_triangle_side + self.horizon_line) y_max = (max_cur_triangle_side * (self.rows - self.horizon_line) / self.main_triangle_side + self.horizon_line) self.col_value = self.col_value[np.logical_and(self.row_value > y_min, self.row_value < y_max)] self.row_value = self.row_value[
np.logical_and(self.row_value > y_min, self.row_value < y_max)
numpy.logical_and
from dataapi import SGS from bloomberg import BBG import numpy as np import pandas as pd from sklearn import preprocessing getdata = SGS() bbg = BBG() start_date = pd.to_datetime("01-01-2001") end_date = pd.to_datetime("07-01-2019") #fetching Brazil FGV Consumer Confidence Index SA Sep 2005=100 Original Date: '30-sep-2005' df = bbg.fetch_series(securities=['BZFGCCSA Index'], fields=['PX_LAST'], startdate=start_date, enddate=end_date) consbr = pd.DataFrame(data=df) consbr = consbr.droplevel(0) consbr = consbr.reset_index() consbr = consbr.set_index('TRADE_DATE') consbr = consbr.resample('Q').mean() # Normalized series Consumer Confidence x = np.array(consbr['BZFGCCSA Index']) x = x.reshape(-1,1) min_max_scaler = preprocessing.MinMaxScaler() x_scaled = min_max_scaler.fit_transform(x) confnorm = consbr confnorm['BZFGCCSA Normalized'] = '' confnorm['BZFGCCSA Normalized'] = x_scaled confnorm = confnorm.drop('BZFGCCSA Index', axis=1) #fetching GDP Growth in R$ df_gr = pd.DataFrame(getdata.fetch("1207",start_date, end_date)) #for GDP in dollars, change the string to 7324 df_gr = df_gr['1207'].resample('Q').mean() df_gr = df_gr.pct_change(4) df_gr = df_gr.dropna() #normalizing GDP x = df_gr.values x = x.reshape(-1,1) min_max_scaler = preprocessing.MinMaxScaler() x_scaled = min_max_scaler.fit_transform(x) df_gr_norm = pd.DataFrame(x_scaled, index=df_gr.index, columns=['GDP Growth Normalized']) #fetching real earnings df_realear = getdata.fetch("10790",start_date, end_date) df_realear = df_realear['10790'].resample("Q").mean() df_realear = df_realear.pct_change(4) df_realear = df_realear.dropna() #normalizing real earnings a = df_realear.values a = a.reshape(-1,1) a_scaled = min_max_scaler.fit_transform(a) df_realear_norm = pd.DataFrame(a_scaled, index=df_realear.index, columns=['Real Earnings Normalized']) #Merging Real Growth DataFrames# df_realgr = pd.merge(df_gr_norm,df_realear_norm, on='Date', how='outer') df_realgr = pd.merge(df_realgr,confnorm, on='Date', how='outer') #adding a column with average df_realgr["Real Growth"] = df_realgr.mean(numeric_only = True, axis=1) # VOLATILITY SERIES start_date = pd.to_datetime('01-jan-2010') end_date = pd.to_datetime('today') df = bbg.fetch_series(securities=['IBOV Index', 'GEBR10Y Index'], fields=['VOLATILITY_90D', 'Volatil 90D'], startdate=start_date, enddate=end_date) volIBOV_90 = pd.DataFrame(data=df['IBOV Index']) volIBOV_90 = volIBOV_90.droplevel('FIELD') volIBOV_90 = volIBOV_90.resample('Q').last() voltitul_90 = pd.DataFrame(data=df['GEBR10Y Index']) voltitul_90 = voltitul_90.droplevel('FIELD') voltitul_90 = voltitul_90.resample('Q').last() # Normalized series IBOV and BR Bonds (titulos) x = np.array(volIBOV_90['IBOV Index']) x = x.reshape(-1,1) ibovnorm = min_max_scaler.fit_transform (x) y = np.array(voltitul_90['GEBR10Y Index']) y = y.reshape(-1,1) titulnorm = min_max_scaler.fit_transform(y) volBR = volIBOV_90 volBR['IBOV Index Normalized'] = '' volBR['IBOV Index Normalized'] = ibovnorm volBR['Titulos Vol. Normalized'] = '' volBR['Titulos Vol. Normalized'] = titulnorm volBR = volBR.drop('IBOV Index', axis=1) # Average Volatility AvgVolBR = volBR AvgVolBR['Avg Vol Br'] = '' AvgVolBR['Avg Vol BR'] = volBR.mean(numeric_only = True, axis=1) AvgVolBR = AvgVolBR.drop('Titulos Vol. Normalized', axis=1) #fetching IPCA df_cpi = pd.DataFrame(getdata.fetch("433", start_date, end_date)) df_cpi = pd.DataFrame(df_cpi["433"].resample('Q').mean()) #normalizing IPCA b = df_cpi.values b = b.reshape(-1, 1) min_max_scaler = preprocessing.MinMaxScaler() b_scaled = min_max_scaler.fit_transform(b) df_cpi_norm = pd.DataFrame(b_scaled, index=df_cpi.index, columns=['CPI Normalized']) #IPCA growth df_cpigr = df_cpi.pct_change(4) df_cpigr = df_cpigr.dropna() df_cpigr = df_cpigr['433'].resample('Q').mean() #normalizing IPCA growth c = df_cpigr.values c = c.reshape(-1, 1) c_scaled = min_max_scaler.fit_transform(c) df_cpigr_norm = pd.DataFrame(c_scaled, index=df_cpigr.index, columns=['CPI Growth Normalized']) #fetching GDP deflator df_gdpdef = pd.DataFrame(getdata.fetch("1211")) df_gdpdef = df_gdpdef.dropna() #normalizing GDP deflator d = df_gdpdef.values d = d.reshape(-1, 1) d_scaled = min_max_scaler.fit_transform(d) df_gdpdef_norm = pd.DataFrame(d_scaled, index=df_gdpdef.index, columns=['GDP Deflator Normalized']) #Merging DataFrames# df_inf = pd.merge(df_cpi_norm,df_cpigr_norm, on='Date', how='outer') df_inf = pd.merge(df_inf,df_gdpdef_norm, on='Date', how='outer') #calculating average inflation df_inf["inflation"] = df_inf.mean(numeric_only=True, axis=1) # MERGING GROWTH, VOLATILITY AND INFLATION DATAFRAMES df_growth = df_realgr["Real Growth"] df_vol = AvgVolBR['Avg Vol BR'] df_series = df_inf["inflation"] df_series = pd.merge(df_series, df_growth, on="Date", how="outer") df_series = pd.merge(df_series, df_vol, on="Date", how="outer") df_series = df_series.apply(zscore) print(df_series) # Data Classification clas_df = df_series clas_df['Inflation Cycle'] = np.where(clas_df['inflation']>0, "Inflationary", "Disinflationary") clas_df['Growth Cycle'] = np.where(clas_df['Real Growth']>0, "Boom", "Stagnation") clas_df['Cycle']= clas_df['Inflation Cycle'] + ' ' + clas_df['Growth Cycle'] clas_df['infcy'] = np.where(clas_df['inflation']>0, 1, 0) clas_df['cy'] =
np.where(clas_df['Real Growth']>0, 2, 0)
numpy.where
# -*- coding: utf-8 -*- ''' ------------------------------------------------------------------------------------------------- This code accompanies the paper titled "Human injury-based safety decision of automated vehicles" Author: <NAME>, <NAME>, <NAME>, <NAME> Corresponding author: <NAME> (<EMAIL>) ------------------------------------------------------------------------------------------------- ''' import cv2 import numpy as np import matplotlib.pyplot as plt import matplotlib.image as mpimg from matplotlib.offsetbox import OffsetImage, AnnotationBbox def resize_rotate(image, angle, l_, w_): ''' resize and rotate the figure. ''' image = cv2.resize(image, (image.shape[1], int(image.shape[0] / (3370 / 8651) * (w_ / l_)))) # grab the dimensions of the image and then determine the center. (h, w) = image.shape[:2] (cX, cY) = (w // 2, h // 2) # grab the rotation matrix and the sine and cosine. M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0) cos =
np.abs(M[0, 0])
numpy.abs
""" Authors: <NAME> (<EMAIL>), <NAME> (<EMAIL>) Copyright © 2021, United States Government, as represented by the Administrator of the National Aeronautics and Space Administration. All rights reserved. The HybridQ: A Hybrid Simulator for Quantum Circuits platform is licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import annotations import numpy as np def is_dm(rho: np.ndarray, atol=1e-6) -> bool: """ check if the given input a valid density matrix. """ rho = np.asarray(rho) d = int(np.sqrt(np.prod(rho.shape))) rho_full = np.reshape(rho, (d, d)) hc = np.allclose(rho_full, rho_full.T.conj(), atol=atol) tp = np.isclose(np.trace(rho_full), 1, atol=atol) apprx_gtr = lambda y, x: np.real(y) >= x or np.isclose(y, x, atol=atol) ev = np.linalg.eigvals(rho_full) psd = np.all([apprx_gtr(e, 0) for e in ev]) return (hc and tp and psd) def ptrace(state: np.ndarray, keep: {int, list[int]}, dims: {int, list[int]} = None) -> np.ndarray: """ compute the partial trace of a pure state (vector) or density matrix. state: np.array One dimensional for pure state e.g. np.array([1,0,0,0]) or two dimensional for density matrix e.g. np.array([[1,0],[0,0]]) keep: list of int the qubits we want to keep (all others traced out). Can also specify a single int if only keeping one qubit. dims: list of int, optional List of qudit dimensions respecting the ordering of `state`. Number of qubits is `len(dims)`, and full Hilbert space dimension is `product(dims)`. If unspecified, assumes 2 for all. Returns the density matrix of the remaining qubits. """ state = np.asarray(state) if len(state.shape) not in (1, 2): raise ValueError('should be pure state (one dimensional) ' 'or density matrix (two dimensional). ' f'Received dimension {len(state.shape)}') # pure state or not pure = len(state.shape) == 1 if not pure and state.shape[0] != state.shape[1]: raise ValueError('invalid state input.') full_dim = np.prod(state.shape[0]) if dims is not None and full_dim != np.prod(dims): raise ValueError('specified dimensions inconsistent with state') n_qubits = np.log2(full_dim) if dims is None else len(dims) if np.isclose(n_qubits, round(n_qubits)): n_qubits = int(round(n_qubits)) else: raise ValueError('invalid state size') keep = [keep] if isinstance(keep, int) else list(keep) if not np.all([q in range(n_qubits) for q in keep]) or len(keep) >= n_qubits: raise ValueError('invalid axes') if dims is None: dims = [2] * n_qubits # dimensions of qubits we keep final_dims = [dims[i] for i in keep] final_dim = np.prod(final_dims) # dimensions to trace out drop_dim = int(round(full_dim / final_dim)) if pure: state = state.reshape(dims) perm = keep + [q for q in range(n_qubits) if q not in keep] state = np.transpose(state, perm).reshape(final_dim, drop_dim) return np.einsum('ij,kj->ik', state, state.conj()) else: # now we have to redefine things in case of a density matrix # basically we double the sizes density_dims = dims + dims keep += [q + n_qubits for q in keep] perm = keep + [q for q in range(2 * n_qubits) if q not in keep] state = state.reshape(density_dims) state = np.transpose(state, perm) state = state.reshape((final_dim, final_dim, drop_dim, drop_dim)) return np.einsum('ijkk->ij', state) def is_channel(channel: SuperGate, atol=1e-8, order: tuple[any, ...] = None, **kwargs) -> bool: """ Checks using the Choi matrix whether or not `channel` defines a valid quantum channel. That is, we check it is a valid CPTP map. Parameters ---------- channel: MatrixSuperGate or KrausSuperGate Must have the method 'map()'. atol: float, optional absolute tolerance to use for determining channel is CPTP. order: tuple[any, ...], optional If provided, Kraus' map is ordered accordingly to `order`. See `MatrixChannel.map()` kwargs: kwargs for `MatrixChannel.map()` """ C = choi_matrix(channel, order, **kwargs) dim = _channel_dim(channel) # trace preserving tp = np.isclose(C.trace(), dim, atol=atol) # hermiticity preserving hp = np.allclose(C, C.conj().T, atol=atol) # completely positive apprx_gtr = lambda e, x: np.real(e) >= x or np.isclose(e, x, atol=atol) cp = np.all([ apprx_gtr(e, 0) and np.isclose(np.imag(e), 0, atol=atol) for e in np.linalg.eigvals(C) ]) return tp and hp and cp def choi_matrix(channel: SuperGate, order: tuple[any, ...] = None, **kwargs) -> np.ndarray: """ return the Choi matrix for channel, of shape (d**2, d**2) for a d-dimensional Hilbert space. The channel can be applied as: Lambda(rho) = Tr_0[ (I \otimes rho^T) C] where C is the Choi matrix. Parameters ---------- channel: MatrixSuperGate or KrausSuperGate Must have the method 'map()'. order: tuple[any, ...], optional If provided, Kraus' map is ordered accordingly to `order`. See `MatrixChannel.map()` kwargs: kwargs for `MatrixChannel.map()` """ if not hasattr(channel, 'map'): raise ValueError("'channel' must have method 'map()'") op = channel.map(order, **kwargs) d = _channel_dim(channel) C = np.zeros((d**2, d**2), dtype=complex) for ij in range(d**2): Eij =
np.zeros(d**2)
numpy.zeros
""" Trainer code for 2D and 3D Noise2Void (https://arxiv.org/abs/1811.10980) Adapted from https://github.com/juglab/pn2v/blob/master/pn2v/training.py, ported from NumPy to PyTorch and generalized to support 3D. """ from typing import Callable import torch from torch import nn import numpy as np import itertools from scipy.ndimage.filters import gaussian_filter from tqdm import tqdm from elektronn3.training.trainer import Trainer, NaNException from elektronn3.modules.loss import MaskedMSELoss import logging logger = logging.getLogger('elektronn3log') @torch.no_grad() def get_stratified_coords(ratio, shape): """ Produce a list of approx. ``num_pix`` random coordinates, sampled from ``shape`` using startified sampling. Supports n-dimensional shapes. """ # total_num = torch.prod(shape).to(torch.float32) # sample_num = total_num * ratio ratio = torch.as_tensor(ratio) ndim = len(shape) shape = torch.as_tensor(shape, dtype=torch.int32) box_size = int(torch.round(torch.sqrt(1. / ratio))) coords = [] box_counts = torch.ceil(shape.float() / box_size).int() for steps in itertools.product(*[range(bc) for bc in box_counts]): steps = torch.as_tensor(steps, dtype=torch.int32) co = torch.randint(0, box_size, (ndim,)) + box_size * steps if torch.all(co < shape): coords.append(co) if not coords: raise ValueError(f'ratio {ratio:.1e} is too close to zero. Choose a higher value.') coords = torch.stack(coords) return coords # TODO: Is the hardcoded small ROI size sufficient? @torch.no_grad() def prepare_sample(img, ratio=1e-3, channels=None): """Prepare binary mask and target image for Noise2Void from a given image""" ndim = img.ndim - 2 # Subtract (N, C) dims if channels is None: channels = range(img.shape[1]) inp = img.clone() target = img mask = torch.zeros_like(img) for n, c in itertools.product(range(img.shape[0]), channels): hotcoords = get_stratified_coords(ratio, img[n, c].shape) maxsh = np.array(img[n, c].shape) - 1 for hc in hotcoords: roimin =
np.clip(hc - 2, 0, None)
numpy.clip
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' Description ''' from __future__ import absolute_import from __future__ import division from __future__ import print_function __author__ = "<NAME>" __copyright__ = "Copyright (C) 2019, HANDBOOK" __credits__ = ["CONG-MINH NGUYEN"] __license__ = "GPL" __version__ = "1.0.1" __date__ = "5/10/2019" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "Development" # ["Prototype", "Development", or "Production"] # Project Style: https://dev.to/codemouse92/dead-simple-python-project-structure-and-imports-38c6 # Code Style: http://web.archive.org/web/20111010053227/http://jaynes.colorado.edu/PythonGuidelines.html#module_formatting #============================================================================== # Imported Modules #============================================================================== import argparse from pathlib import Path import os.path import sys import time import copy os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "" # The GPU id to use, usually either "0" or "1" import json import numpy as np import cv2 import requests from Camera.OrbbecAstraS.camera import Camera, rgbd_to_pointcloud from GeneralUtils import List, Tuple, Dict, Union, Generic, TypeVar from GeneralUtils import sample_arrays, stack_list_horizontal from PointCloudUtils import visualize_pc, points_to_pc, coords_labels_to_pc, load_ply_as_pc, load_ply_as_points from PointCloudUtils import adjust_pc_coords, global_icp from PointCloudUtils import radian2degree, degree2radian, m2mm, mm2m, create_rotx_matrix, create_roty_matrix, create_rotz_matrix, create_tranl_matrix from Segmentation.PointNet.learner import PointNetLearner #============================================================================== # Constant Definitions #============================================================================== #============================================================================== # Function Definitions #============================================================================== def mpose2mmpose(pose: np.ndarray): tarr = np.ones(len(pose)) tarr[:3] *= 1000 return pose * tarr def mmpose2mpose(pose: np.ndarray): tarr = np.ones(len(pose)) tarr[:3] *= 0.001 return pose * tarr def load_object_models(model_path='./obj_models/modelMay10/'): """ Description: :param model_path: str, path to the reference models of known objects :return: pc_models, List[2L ndarrays], list of points of target surface :return: centroid_models, List[Vector(3 floats)], the list of centroids of model :return: pose_models, List[List[Vector(6 floats)]], the list of pose list of each model(each model has a list of poses) """ pc_models = [] centroid_models = [] pose_models = [] files = os.listdir(path=os.path.join(model_path, 'pc/')) for _, file in enumerate(files): filename, _ = os.path.splitext(file) pc_model = load_ply_as_points(file_path=os.path.join(model_path, 'pc/', file)) centroid, grasping_pose = np.load(os.path.join(model_path, 'info/', filename + '.npy'), allow_pickle=True) grasping_pose = np.array(grasping_pose).astype(float) grasping_pose[:, :3] = mm2m(grasping_pose[:, :3]) pc_models.append(pc_model) centroid_models.append(centroid) pose_models.append(grasping_pose) return pc_models, centroid_models, pose_models def measure_xtran_params(neutral_point, transformation): """ Description: Assume that the transformation from robot coord to camera coord is: RotX -> RotY -> RotZ -> Tranl In this case: RotX = 180, RotY = 0; RotZ = -90; Tranl: unknown But we know coords of a determined neutral point in 2 coord systems, hence we can measure Transl from robot centroid to camera centroid.(Step 2) :param neutral_point : Dict, list of 2 coords of neutral_point in 2 coord systems :param transformation : Dict, list of 3 rotating transformations :return: r2c_xtran : Matrix 4x4 floats, transformation from robot coord to camera coord :return: c2r_xtran : Matrix 4x4 floats, transformation from camera coord to robot coord # :return: tranl : Matrix 4x4 floats, translation from robot coord to camera coord """ # 1: Load coords of the neutral point neutral_robot = mm2m(coords=np.array(neutral_point['robot_coord'])) # neutral point coord in robot coord system neutral_camera = mm2m(coords=np.array(neutral_point['camera_coord'])) # neutral point coord in camera coord system rotx = create_rotx_matrix(theta=-transformation['rotx']) # load transformation matrix of rotation around x roty = create_roty_matrix(theta=-transformation['roty']) # load transformation matrix of rotation around y rotz = create_rotz_matrix(theta=-transformation['rotz']) # load transformation matrix of rotation around z # 2: Find transformation between robot coord centroid and camera coord centroid rotxyz = np.dot(np.dot(rotz, roty), rotx) # determine transformation matrix after rotate sequently around x, y, z neutral_robot3 = np.dot(rotxyz, np.append(neutral_robot, 1))[:3] # find coord of neutral point after RotXYZ Oc_in_3 = neutral_robot3 - neutral_camera # find coord of robot centroid in camera coord system tranl = create_tranl_matrix(vector=-Oc_in_3) # 3: Find transformation matrix from robot to camera # r2c_xtran = np.dot(np.dot(np.dot(tranl, rotz), roty), rotx) # c2r_xtran = np.linalg.inv(r2c_xtran) return rotx, roty, rotz, tranl def input_cli(): user_input = input("Enter CLI commands such as (--NAME VALUE ...): ") custom_parser = argparse.ArgumentParser() custom_parser.add_argument('-vb', '--verbose', type=bool, help='show detail results') custom_parser.add_argument('-vs', '--voxel_size', type=float, help='adjust voxel size') custom_parser.add_argument('-ft', '--fitness_threshold', type=float, help='adjust voxel size') custom_parser.add_argument('-pi', '--selected_pose_id', type=int, help='select pose id that will execute grasp') custom_args = custom_parser.parse_args(user_input.split()) return custom_args def normalize_pc(points: np.ndarray): new_points = copy.deepcopy(points) new_points[:, 2] -= 0.677 new_points[:, 3:6] /= 255. return new_points def segment_obj_in_scene(scene_points, n_points: int=16384, n_channels: int=6, url='http://127.0.0.1:5000/api/'): """ Description: segment the point clouds of wrench and pipe out of scene :param learner : Object, a PointNet Learner that's able to do predict point-wise classification :param scene_points : 2L ndarray(shape=(n_points, n_channels)), list of points :param n_points : int > 0, number input points of PointNet Learner :param n_channels : int > 0, number channels of input points of PointNet Learner :return: wrench_points : 2L ndarray, points of wrench :return: pipe_points : 2L ndarray, points of pipe """ # Shuffle points to distribute the points equally in arrays(useful for next step, cut scene into parts to segment) n_scene_points = len(scene_points) scene_points = sample_arrays(arrs=scene_points, n_samples=n_scene_points) # Do segment(cut scene into 2 parts, segment each part then unify results of 2 parts to get overall picture) wrench_points = [] pipe_points = [] for i in range(2): # sample the points to fit the network cur_scene_points = scene_points[i * n_scene_points // 2:(i + 1) * n_scene_points // 2] cur_scene_points = sample_arrays(arrs=cur_scene_points, n_samples=n_points) # predict segment labels(send data to remote server through RESTful API) # pred_labels = learner.predict(x=normalize_pc(points=cur_scene_points[:, :n_channels])) data = {'points': normalize_pc(points=cur_scene_points[:, :n_channels]).tolist()} j_data = json.dumps(data) headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'} res = requests.post(url=url, data=j_data, headers=headers) pred_labels = np.asarray(json.loads(res.text)) # extract the points in the scene of each object by labels wrench_points.append(cur_scene_points[pred_labels == 2]) pipe_points.append(cur_scene_points[pred_labels == 3]) wrench_points = np.vstack(wrench_points) # get entire points of wrench pipe_points = np.vstack(pipe_points) # get entire points of pipe # visualize_pc(coords_labels_to_pc(coords=cur_scene_points[:, :3], labels=pred_labels)) return wrench_points, pipe_points def match_object_surface(surface: np.ndarray, model: np.ndarray, model_centroid: Tuple[float, float, float], voxel_size: float, n_channel: int=6, verbose: bool=False): """ Description: :param surface : 2L ndarray(shape=(n_points, n_channels)), list of points of target surface :param model : 2L ndarray(shape=(n_points, n_channels)), list of points of target surface :param model_centroid : Vector(3 floats), the centroid of `model` :param voxel_size : float, default=0.6, downsampling size of point cloud in `global_icp` algorithm :param n_channel : int > 0, number channels of input points of PointNet Learner :param verbose : bool, show detail results and notification or not :return: TYPE, MEAN """ point_cloud_model = adjust_pc_coords(point_cloud=points_to_pc(model[:, :n_channel]), coord=model_centroid) point_cloud_target = adjust_pc_coords(point_cloud=points_to_pc(surface[:, :n_channel]), coord=model_centroid) xtran = global_icp(source=points_to_pc(point_cloud_model), target=points_to_pc(point_cloud_target), voxel_size=voxel_size, verbose=verbose) print(xtran) return xtran def interpolate_pose(ref_pose, surf_xtran, rotx, roty, rotz, tranl, pc_centroid): """ Description: match reference_pose of (x, y, z) (rx, ry, rz) and (mode, aperture) from reference source to target point cloud :param ref_pose : Vector(8 floats), the pose of the reference model :param surf_xtran : Matrix(4x4 floats), the transformation matrix from source model to target point cloud :param rotx : Matrix(4x4 floats), the transformation matrix of rotation around x axis of robot coord :param roty : Matrix(4x4 floats), the transformation matrix of rotation around y axis of robot coord :param rotz : Matrix(4x4 floats), the transformation matrix of rotation around z axis of robot coord :param tranl : Matrix(4x4 floats), the transformation matrix of translation from robot origin to the camera origin :param pc_centroid : Matrix(4x4 floats), the centroid of considered point cloud :return: Vector(6 floats), the pose in robot system """ # transformation matrix of robot origin to point cloud center, xyz elements tranl2 = create_tranl_matrix(vector=-np.array(pc_centroid)) r2pc_xyz_xtran = np.dot(np.dot(np.dot(np.dot(tranl2, tranl), rotz), roty), rotx) pc2r_xyz_xtran = np.linalg.inv(r2pc_xyz_xtran) # measure xyz new_xyz = np.append(arr=ref_pose[:3], values=1, axis=None) new_xyz = np.dot(r2pc_xyz_xtran, new_xyz) new_xyz = np.dot(surf_xtran, new_xyz) new_xyz = np.dot(pc2r_xyz_xtran, new_xyz) # measure roll-pitch-yaw # new_rpy = ref_pose[3:6] + radian2degree(rotation_matrix_to_euler_angles(surf_xtran[:3, :3])) new_yaw = ref_pose[5] + radian2degree(rotation_matrix_to_euler_angles(surf_xtran[:3, :3]))[2] # restrict rx, ry because of real robot problem new_pose = copy.deepcopy(ref_pose) new_pose[:3] = new_xyz[:3] # new_pose[3:6] = new_rpy[:3] new_pose[5] = new_yaw return new_pose def rgbd_to_pointcloud1(rgb, depth, scale=0.001, focal_length_x=520, focal_length_y=513, label=False, offset=0, **kwargs): """ Convert single RGBD image to point cloud :param rgb: 3L ndarray of int, RGB image :param depth: 1L ndarray of any, depth image :param scale: a float value, scale=0.001->scale into Meter unit, scale=1->scale into miliMeter unit :param focal_length_x: a float value, focal_length of x axis :param focal_length_y: a float value, focal_length of y axis :param label: a bool value, enable or disable labeling data :param **kwargs: a list of 3L ndarray of int, list of label tables this arguments are only used when 'label' is set True size(h, w) of each label table must equal size of rgb image :return: a list of points as [X, Y, Z, label(optional)] """ center_y, center_x = (rgb.shape[0] - 1) / 2, (rgb.shape[1] - 1) / 2 points = [] for row in range(rgb.shape[0]): for col in range(rgb.shape[1]): R, G, B = rgb[row, col] # obtain z value and ignore the un-obtained point(z=0) Z = depth[row, col] if Z == 0: continue # measure world coordinates in Meter(scale=0.001) or miliMeter(scale=1) Z = Z * scale X = (col - center_x) * Z / focal_length_x Y = (row - center_y) * Z / focal_length_y # label the point if input the label table(in kwargs) if label: label_point = offset for i, (mask_name, label_table) in enumerate(list(kwargs.items())): if label_table[row, col] > 0: label_point += i+1 points.append([X, Y, Z, R, G, B, row, col, label_point]) else: points.append([X, Y, Z, R, G, B, row, col]) return np.asarray(points) import math # Checks if a matrix is a valid rotation matrix. def is_rotation_matrix(R: np.array) -> bool: """ Check??? :param R: a matrix of 4x4 :return: a boolean, ??? """ Rt = np.transpose(R) shouldBeIdentity =
np.dot(Rt, R)
numpy.dot
import numpy as np from NeuralNetwork.utils import Activation, cost, regularization, optimizers, Normalizer from NeuralNetwork.utils.defaults import TanH, SoftMax, default_settings import json class NeuralNetwork(optimizers.optimizers): def __init__(self, input_neurons=0, network_shape=None, learning_rate=0.1, beta=0.9, gamma=0.999, epsilon=1e-8, activation=TanH, settings=None, activation_param=None, do_regularization=False, do_normalization=True): super().__init__() if network_shape is None: network_shape = [] self.ActivationList = {} self.RegularizationList = {} self.CostList = {} self.OptimizerList = {} self.NormList = {} self.Activations = [] self.param = activation_param self.make_dict() self.shape = [input_neurons] self.activation = [] self.derivative = [] self.make(network_shape, activation) self.learning_rate = learning_rate self.weights_shape = [] self.weights = [] self.biases = [] self.cost_key = default_settings["cost"] self.cost = None self.d_cost = None self.regularization_key = None self.regularization = None self.lam = None self.do_regularization = do_regularization self.optimizer_key = default_settings["optimizer"] self.optimizer = self.OptimizerList[self.optimizer_key] self.do_norm = do_normalization self.norm_key = default_settings["normalizer"] self.normalizer = self.NormList[self.norm_key] self.beta = beta self.gamma = gamma self.epsilon = epsilon if settings is None: self.settings = default_settings else: self.settings = settings self.set_settings() for i in range(len(self.shape) - 1): self.weights_shape.append([self.shape[i + 1], self.shape[i]]) self.weights.append( np.random.standard_normal(self.weights_shape[i]) / (self.weights_shape[i][1] ** 0.5)) self.biases.append(np.random.standard_normal([self.shape[i + 1], 1])) def copy(self): model = NeuralNetwork() model.shape = [i for i in self.shape] model.weights_shape = [[i for i in j] for j in self.weights_shape] model.learning_rate = self.learning_rate model.Activations = [i for i in self.Activations] model.param = self.param model.optimizer_key = self.optimizer_key model.cost_key = self.cost_key model.beta = self.beta model.gamma = self.gamma model.epsilon = self.epsilon model.do_regularization = self.do_regularization model.regularization_key = self.regularization_key model.regularization = self.regularization model.do_norm = self.do_norm model.norm_key = self.norm_key model.weights = [i.copy() for i in self.weights] model.biases = [i.copy() for i in self.biases] model.activation = [model.ActivationList[i].fn for i in model.Activations] model.derivative = [model.ActivationList[i].d for i in model.Activations] model.optimizer = model.OptimizerList[model.optimizer_key] model.cost = model.CostList[model.cost_key].cost model.d_cost = model.CostList[model.cost_key].d_cost model.normalizer = model.NormList[model.norm_key] return model def mutate(self, mutation_rate, mutation_ratio): def internal(x, mr, mro, element): temp1 = np.random.uniform(0, 1) if temp1 < mr: temp2 = np.random.uniform() if temp2 < mro: return np.random.standard_normal()/(self.weights_shape[element][1]**0.5) else: return x+np.random.uniform(-x/100, x/100) else: return x randomize = np.vectorize(internal) for i in range(len(self.weights)): self.weights[i] = randomize(self.weights[i], mutation_rate, mutation_ratio, i) self.biases[i] = randomize(self.biases[i], mutation_rate, mutation_ratio, i) def crossover(self, parent2): child = self.copy() for i in range(len(self.weights)): r = np.random.uniform(0, 1) if r < 0.5: pass else: child.weights[i] = np.copy(parent2.weights[i]) child.biases[i] =
np.copy(parent2.biases[i])
numpy.copy
#!/usr/bin/env python2 """ Subroutine for applying FRI-type compression to the Near-Uniform distribution. """ import numpy import compress_utils import near_uniform import fci_c_utils def cmp_hier_strat(sol_vec, n_sample, p_doub, occ_orb, orb_symm, symm_lookup, hf_num, rngen_ptrs): """Perform FRI-type compression on the Near-Uniform distribution, column-by-column, preserving colummns exactly as determined by number of samples vs. number of nonzero elements. Parameters ---------- sol_vec : (SparseVector object) the current solution vector n_sample : (unsigned int) the desired number of nonzero matrix elements in each column after compression p_doub : (double) the probability of choosing a double excitation vs a single excitation occ_orb : (numpy.ndarray, uint8) The numbers in each row correspond to the indices of occupied orbitals in each determinant, calculated from fci_c_utils.gen_orb_lists orb_symm : (numpy.ndarray, uint8) irreducible representation of each spatial orbital symm_lookup : (numpy.ndarray, uint8) Table of orbitals with each type of symmetry, as generated by fci_utils.gen_byte_table() Returns ------- (numpy.ndarray, uint8) : chosen occupied (0th and 1st columns) and unoccupied (2nd and 3rd columns) orbitals for double excitations (numpy.ndarray, float64) : probability of selecting each chosen double excitation (weight divided by compressed weight) (numpy.ndarray, uint32) : index of the origin determinant of each chosen double excitation in the dets array (numpy.ndarray, uint8) : chosen occupied (0th column) and unoccupied (1st column) orbitals for single excitations (numpy.ndarray, float64) : probability of selecting each chosen single excitation (numpy.ndarray, uint32) : index of the origin determinant of each chosen single excitation in the dets array """ vec_weights = numpy.abs(sol_vec.values) one_norm = vec_weights.sum() kept_sing_orb = numpy.empty([0, 2], dtype=numpy.uint8) kept_doub_orb = numpy.empty([0, 4], dtype=numpy.uint8) kept_sing_idx = numpy.empty(0, dtype=numpy.uint32) kept_doub_idx = numpy.empty(0, dtype=numpy.uint32) num_nonz = vec_weights.shape[0] max_idx = 0 target_n_col = (n_sample - num_nonz) * vec_weights[max_idx] / one_norm # target_n_col = n_sample * vec_weights[max_idx] / one_norm all_col_doub, det_idx = fci_c_utils.all_doub_ex(sol_vec.indices[0:1], occ_orb[0:1], orb_symm) all_col_sing = numpy.empty([0, 2], dtype=numpy.uint8) n_col = hf_num while target_n_col + 1 > n_col and one_norm > 1e-10: kept_sing_orb = numpy.append(kept_sing_orb, all_col_sing, axis=0) kept_doub_orb = numpy.append(kept_doub_orb, all_col_doub, axis=0) kept_sing_idx = numpy.append(kept_sing_idx, numpy.full(all_col_sing.shape[0], max_idx, dtype=numpy.uint32)) kept_doub_idx = numpy.append(kept_doub_idx, numpy.full(all_col_doub.shape[0], max_idx, dtype=numpy.uint32)) one_norm -= vec_weights[max_idx] n_sample -= n_col vec_weights[max_idx] = 0 num_nonz -= 1 max_idx = numpy.argmax(vec_weights) target_n_col = (n_sample - num_nonz) * vec_weights[max_idx] / one_norm # target_n_col = n_sample * vec_weights[max_idx] / one_norm curr_det = sol_vec.indices[max_idx:(max_idx + 1)] curr_occ = occ_orb[max_idx:(max_idx + 1)] all_col_doub, det_idx = fci_c_utils.all_doub_ex(curr_det, curr_occ, orb_symm) n_col = det_idx.shape[0] all_col_sing, det_idx = fci_c_utils.all_sing_ex(curr_det, curr_occ, orb_symm) n_col += det_idx.shape[0] print('number preserved exactly', vec_weights.shape[0] - num_nonz) doub_probs = numpy.ones(kept_doub_orb.shape[0]) sing_probs = numpy.ones(kept_sing_orb.shape[0]) one_norm = vec_weights.sum() if one_norm > 1e-10: n_col, = compress_utils.sys_resample(vec_weights / one_norm, n_sample - num_nonz, ret_counts=True) n_col[vec_weights != 0] += 1 # n_col, = compress_utils.sys_resample(vec_weights / one_norm, n_sample, ret_counts=True) else: return kept_doub_orb, doub_probs, kept_doub_idx, kept_sing_orb, sing_probs, kept_sing_idx single_col = n_col == 1 single_counts = numpy.zeros_like(sol_vec.indices, dtype=numpy.uint32) single_counts[single_col] = 1 n_col[single_col] = 0 # single_counts = n_col single_doub, single_sing = near_uniform.bin_n_sing_doub(single_counts, p_doub) one_doub_orb, one_doub_prob, one_doub_idx = near_uniform.doub_multin( sol_vec.indices, occ_orb, orb_symm, symm_lookup, single_doub, rngen_ptrs) one_doub_prob *= p_doub # * single_counts[one_doub_idx] one_sing_orb, one_sing_prob, one_sing_idx = near_uniform.sing_multin( sol_vec.indices, occ_orb, orb_symm, symm_lookup, single_sing, rngen_ptrs) one_sing_prob *= (1 - p_doub) # * single_counts[one_sing_idx] doub_orb = numpy.append(kept_doub_orb, one_doub_orb, axis=0) doub_probs = numpy.append(doub_probs, one_doub_prob) doub_idx = numpy.append(kept_doub_idx, one_doub_idx) sing_orb = numpy.append(kept_sing_orb, one_sing_orb, axis=0) sing_probs = numpy.append(sing_probs, one_sing_prob) sing_idx = numpy.append(kept_sing_idx, one_sing_idx) fri_doub_orb, fri_doub_probs, fri_doub_idx, fri_sing_orb, fri_sing_probs, fri_sing_idx = near_uniform.fri_parallel(sol_vec.indices, occ_orb, orb_symm, symm_lookup, n_col, rngen_ptrs, p_doub) doub_orb = numpy.append(doub_orb, fri_doub_orb, axis=0) doub_probs = numpy.append(doub_probs, fri_doub_probs) doub_idx = numpy.append(doub_idx, fri_doub_idx) sing_orb = numpy.append(sing_orb, fri_sing_orb, axis=0) sing_probs = numpy.append(sing_probs, fri_sing_probs) sing_idx = numpy.append(sing_idx, fri_sing_idx) return doub_orb, doub_probs, doub_idx, sing_orb, sing_probs, sing_idx def cmp_hier(sol_vec, n_sample, p_doub, occ_orb, orb_symm, symm_lookup): """Perform FRI-type compression on the Near-Uniform distribution, exploiting its hierarchical structure for efficiency. Parameters ---------- sol_vec : (SparseVector object) the current solution vector n_sample : (unsigned int) the desired number of nonzero matrix elements after the compression p_doub : (double) the probability of choosing a double excitation vs a single excitation occ_orb : (numpy.ndarray, uint8) The numbers in each row correspond to the indices of occupied orbitals in each determinant, calculated from fci_c_utils.gen_orb_lists orb_symm : (numpy.ndarray, uint8) irreducible representation of each spatial orbital symm_lookup : (numpy.ndarray, uint8) Table of orbitals with each type of symmetry, as generated by fci_utils.gen_byte_table() Returns ------- (numpy.ndarray, uint8) : chosen occupied (0th and 1st columns) and unoccupied (2nd and 3rd columns) orbitals for double excitations (numpy.ndarray, float64) : probability of selecting each chosen double excitation (numpy.ndarray, uint32) : index of the origin determinant of each chosen double excitation in the dets array (numpy.ndarray, uint8) : chosen occupied (0th column) and unoccupied (1st column) orbitals for single excitations (numpy.ndarray, float64) : probability of selecting each chosen single excitation (numpy.ndarray, uint32) : index of the origin determinant of each chosen single excitation in the dets array """ seq_idx = numpy.arange(n_sample, dtype=numpy.int32) symm_virt = near_uniform.virt_symm(occ_orb, orb_symm, symm_lookup) occ_allow, virt_allow = near_uniform.sing_allow(symm_virt, occ_orb, orb_symm) # First layer of compression: singles vs. doubles sing_doub = numpy.array([[1 - p_doub], [p_doub]]) num_dets = sol_vec.values.shape[0] vec_reweights = numpy.abs(sol_vec.values) new_weights = sing_doub * vec_reweights new_weights.shape = -1 # singles first, then doubles fri_idx, fri_vals = compress_utils.fri_1D(new_weights, n_sample) n_fried = fri_idx.shape[0] det_idx = fri_idx % num_dets # index of determinant det_idx = det_idx.astype(numpy.uint32) n_sing = numpy.searchsorted(fri_idx, num_dets) n_doub = n_fried - n_sing # Second layer of compression: occupied orbitals, or occupied pairs, for each choice counts = occ_allow[det_idx[:n_sing], 0].astype(numpy.uint32) disallowed_ex = counts == 0 fri_vals[:n_sing][disallowed_ex] = 0 counts[disallowed_ex] = 1 # to avoid 0/0 errors n_elec = occ_orb.shape[1] n_occ_pair = n_elec * (n_elec - 1) / 2 counts = numpy.append(counts, n_occ_pair * numpy.ones(n_doub, numpy.uint32)) fri_idx, fri_vals = compress_utils.fri_subd(fri_vals, counts, numpy.empty([0, 0]), n_sample) sampl_idx = fri_idx[:, 0] # Group nonzero elements in FRI vector by single/double excitations sing_idx = sampl_idx < n_sing doub_idx = numpy.logical_not(sing_idx) new_det_idx = det_idx[sampl_idx[sing_idx]] n_sing = new_det_idx.shape[0] occ_idx = occ_allow[new_det_idx, fri_idx[sing_idx, 1] + 1] new_det_idx = numpy.append(new_det_idx, det_idx[sampl_idx[doub_idx]]) det_idx = new_det_idx occ_idx = numpy.append(occ_idx, fri_idx[doub_idx, 1]) # index of occupied orbital or occ pair fri_vals = numpy.append(fri_vals[sing_idx], fri_vals[doub_idx]) # Third layer of compression: allowed virtual orbitals for singles, symmetry pairs for doubles doub_wts, doub_nvirt, doub_occ = near_uniform.symm_pair_wt(symm_virt, occ_orb, orb_symm, det_idx[n_sing:], occ_idx[n_sing:]) null_doub = numpy.logical_and(doub_occ[:, 0] == 0, doub_occ[:, 1] == 0) null_idx = numpy.nonzero(null_doub)[0] fri_vals[null_idx + n_sing] = 0 fri_idx, fri_vals = compress_utils.fri_subd(fri_vals, virt_allow[det_idx[:n_sing], occ_idx[:n_sing]], doub_wts, n_sample) all_arrs, sing_arrs, doub_arrs, n_sing = compress_utils.proc_fri_sd_choices( fri_idx[:, 0], n_sing, [det_idx, occ_idx], [], [doub_occ, doub_wts, doub_nvirt]) det_idx, occ_idx = all_arrs doub_occ, doub_wts, doub_nvirt = doub_arrs n_doub = doub_occ.shape[0] virt_idx = fri_idx[:, 1] doub_wts = doub_wts[seq_idx[:n_doub], virt_idx[n_sing:]] doub_nvirt = doub_nvirt[seq_idx[:n_doub], virt_idx[n_sing:]] orb_counts = numpy.append(numpy.ones(n_sing), doub_nvirt) weights = numpy.empty((0, 2)) # Fourth layer of compression: virtual orbital pair for doubles fri_idx, fri_vals = compress_utils.fri_subd(fri_vals, orb_counts, weights, n_sample) sampl_idx = fri_idx[:, 0] # Group nonzero elements in FRI vector by single/double excitations sing_idx = sampl_idx < n_sing doub_idx =
numpy.logical_not(sing_idx)
numpy.logical_not
__author__ = 'lynevdv' from lenstronomy.LensModel.Profiles.multipole import Multipole import numpy as np import pytest import numpy.testing as npt class TestMultipole(object): """ tests the Gaussian methods """ def setup(self): self.Multipole = Multipole() def test_function(self): x = 1 y = 2 m = 4 a_m = 0.05 phi_m = 25*np.pi/180. values = self.Multipole.function(x, y, m, a_m, phi_m) npt.assert_almost_equal(values, 0.006684307, decimal=6) x = np.array([0]) y = np.array([0]) values = self.Multipole.function(x, y, m, a_m, phi_m) assert values[0] == 0 x = np.array([2, 3, 4]) y = np.array([1, 1, 1]) values = self.Multipole.function(x, y, m, a_m, phi_m) npt.assert_almost_equal(values[0], -0.007409114, decimal=6) npt.assert_almost_equal(values[1], -0.009453038, decimal=6) npt.assert_almost_equal(values[2], -0.009910505, decimal=6) def test_derivatives(self): x = 1 y = 2 m = 4 a_m = 0.05 phi_m = 25 * np.pi / 180. f_x, f_y = self.Multipole.derivatives(x, y, m, a_m, phi_m) npt.assert_almost_equal(f_x, -0.003939644, decimal=6) npt.assert_almost_equal(f_y, 0.005311976, decimal=6) x = np.array([1]) y = np.array([2]) f_x, f_y = self.Multipole.derivatives(x, y, m, a_m, phi_m) npt.assert_almost_equal(f_x[0], -0.003939644, decimal=6) npt.assert_almost_equal(f_y[0], 0.005311976, decimal=6) x = np.array([2, 3, 1]) y = np.array([1, 1, 4]) f_x, f_y = self.Multipole.derivatives(x, y, m, a_m, phi_m) npt.assert_almost_equal(f_x[0], -0.003613858, decimal=6) npt.assert_almost_equal(f_x[1], -0.000970385, decimal=6) npt.assert_almost_equal(f_x[2], 0.005970704, decimal=6) npt.assert_almost_equal(f_y[0], -0.000181398, decimal=6) npt.assert_almost_equal(f_y[1], -0.006541883, decimal=6) npt.assert_almost_equal(f_y[2], 0.001649720, decimal=6) def test_hessian(self): x = 1 y = 2 m = 4 a_m = 0.05 phi_m = 25 * np.pi / 180. f_xx, f_xy, f_yx, f_yy = self.Multipole.hessian(x, y, m, a_m, phi_m) npt.assert_almost_equal(f_xx, -0.016042338, decimal=6) npt.assert_almost_equal(f_yy, -0.004010584, decimal=6) npt.assert_almost_equal(f_xy, 0.008021169, decimal=6) npt.assert_almost_equal(f_xy, f_yx, decimal=8) x = np.array([1]) y = np.array([2]) f_xx, f_xy, f_yx, f_yy = self.Multipole.hessian(x, y, m, a_m, phi_m) npt.assert_almost_equal(f_xx[0], -0.016042338, decimal=6) npt.assert_almost_equal(f_yy[0], -0.004010584, decimal=6) npt.assert_almost_equal(f_xy[0], 0.008021169, decimal=6) x = np.array([1,3,4]) y = np.array([2,1,1]) values = self.Multipole.hessian(x, y, m, a_m, phi_m) npt.assert_almost_equal(values[0][0], -0.016042338, decimal=6) npt.assert_almost_equal(values[3][0], -0.004010584, decimal=6)
npt.assert_almost_equal(values[1][0], 0.008021169, decimal=6)
numpy.testing.assert_almost_equal
import argparse import os import scipy.io as sio import numpy as np import nibabel as nib import time import tensorflow as tf from BrainStructureAwareNetwork_arch import BrainStructureAwareNetwork from SpatialConnectionAwareNetwork_arch import SpatialConnectionAwareNetwork from getimgtest import getimgtest import math batch_size = 1 # to recover the origin distribution Intensity_max = 255 Intensity_mean = 0.1616 Intensity_std = 0.2197 parser = argparse.ArgumentParser(description='Tensorflow DeepVolume Test') parser.add_argument('--gpu', type=int, default=0, help='gpu device id') parser.add_argument('--datafile', default='../datafile/', type=str, help='path to datafile folder') parser.add_argument('--savepath', default='../output/', type=str, help='path to output folder') parser.add_argument('--modelpath', default='../models/', type=str, help='path to model save folder') parser.add_argument('-s', '--stage', type=int, default=1, help='load the network one by one...') args = parser.parse_args() def test_BrainStructureNetwork(): filespath = args.datafile axialThickpath = filespath + 'axialThick-test.txt' sagittalThickpath = filespath + 'sagittalThick-test.txt' savepath = args.savepath + 'test' modelpath = args.modelpath + 'BrainStructureAwareNetwork/Model100.ckpt' val_axialfile = open(axialThickpath) val_axialread = val_axialfile.read().splitlines() ntest = len(val_axialread) testsavepath = savepath + str(1) if (os.path.isdir(testsavepath) == False) : for k in range(0, ntest): os.mkdir(savepath + str(k + 1)) print("Folder created") with tf.name_scope('input'): LR = tf.placeholder(tf.float32, shape=[batch_size, 200, 200, 200, 2]) keep_prob = tf.placeholder(tf.float32, name='dropout_ratio') probs, logits = BrainStructureAwareNetwork(LR, keep_prob) config = tf.ConfigProto() config.gpu_options.visible_device_list = str(args.gpu) sess = tf.Session(config=config) saver = tf.train.Saver() saver.restore(sess, modelpath) print("Model loaded") def feed_dict(xstart, ystart, zstart, LRimg): xs = np.zeros((1, 200, 200, 200, 2)) xs[:, :, :, :, :] = LRimg[:,xstart:xstart + 200, ystart:ystart + 200, zstart:zstart + 200, :] return {LR: xs, keep_prob: 1} # sample patches with tumor for kv in range(0, ntest): time_start = time.time() print('Loading from test case ' + str(kv + 1) + ' for test for stage 1') LRimg = getimgtest(axialThickpath, sagittalThickpath, kv) x_range = LRimg.shape[1] y_range = LRimg.shape[2] z_range = LRimg.shape[3] if z_range < 200: LRimgpad = np.zeros((1, x_range, y_range, 200, 2)) LRimgpad[:,:,:,0:z_range,:] = LRimg LRimg = LRimgpad # The receptive field of 3D U-net is 68 # We should retrive the center 40^3 pixel of 200^3 to reconstruct if z_range < 200: hp = np.zeros((x_range, y_range, 200, 1)) else: hp = np.zeros((x_range, y_range, z_range, 1)) x_sample = np.floor((x_range -160) / 40) + 1 x_sample = x_sample.astype(np.int16) y_sample = np.floor((y_range -160) / 40) + 1 y_sample = y_sample.astype(np.int16) z_sample = np.maximum(np.floor((z_range -160) / 40) + 1, 1) z_sample = z_sample.astype(np.int16) for jx in range(0, x_sample): for jy in range(0, y_sample): for jz in range(0, z_sample): # deal with the boundaries if jx < x_sample - 1: # not the last xstart = jx * 40 else: xstart = x_range - 200 if jy < y_sample - 1: # not the last ystart = jy * 40 else: ystart = y_range - 200 if jz < z_sample - 1: # not the last zstart = jz * 40 else: zstart = LRimg.shape[3] - 200 ht = sess.run(probs, feed_dict=feed_dict(xstart, ystart, zstart, LRimg)) # setting the middle content hp[xstart + 80:xstart + 120, ystart + 80:ystart + 120, zstart + 80:zstart + 120, :] = ht[0, 80:120, 80:120, 80:120, :] # care about the boundies! the patch near the boundies should have half-full padding if jx == 0: hp[xstart:xstart + 120, ystart + 80:ystart + 120, zstart + 80:zstart + 120, :] = ht[0, 0:120, 80:120, 80:120, :] if jx == x_sample - 1: hp[xstart + 80:xstart + 200, ystart + 80:ystart + 120, zstart + 80:zstart + 120, :] = ht[0, 80:200, 80:120, 80:120, :] if jy == 0: hp[xstart + 80:xstart + 120, ystart:ystart + 120, zstart + 80:zstart + 120, :] = ht[0, 80:120, 0:120, 80:120, :] if jy == y_sample - 1: hp[xstart + 80:xstart + 120, ystart + 80:ystart + 200, zstart + 80:zstart + 120, :] = ht[0, 80:120, 80:200, 80:120, :] if jz == 0: hp[xstart + 80:xstart + 120, ystart + 80:ystart + 120, zstart:zstart + 120, :] = ht[0, 80:120, 80:120, 0:120, :] if jz == z_sample - 1: hp[xstart + 80:xstart + 120, ystart + 80:ystart + 120, zstart + 80:zstart + 200, :] = ht[0, 80:120, 80:120, 80:200, :] # then the 4 corner...xy if jx == 0 and jy == 0: hp[xstart:xstart + 120, ystart:ystart + 120, zstart + 80:zstart + 120, :] = ht[0, 0:120, 0:120, 80:120, :] if jx == 0 and jy == y_sample - 1: hp[xstart:xstart + 120, ystart + 80:ystart + 200, zstart + 80:zstart + 120, :] = ht[0, 0:120, 80:200, 80:120, :] if jx == x_sample - 1 and jy == 0: hp[xstart + 80:xstart + 200, ystart:ystart + 120, zstart + 80:zstart + 120, :] = ht[0, 80:200, 0:120, 80:120, :] if jx == x_sample - 1 and jy == y_sample - 1: hp[xstart + 80:xstart + 200, ystart + 80:ystart + 200, zstart + 80:zstart + 120, :] = ht[0, 80:200, 80:200, 80:120, :] # then the 4 corner...xz if jx == 0 and jz == 0: hp[xstart:xstart + 120, ystart+80:ystart + 120, zstart:zstart + 120, :] = ht[0, 0:120, 80:120, 0:120, :] if jx == 0 and jz == z_sample - 1: hp[xstart:xstart + 120, ystart + 80:ystart + 120, zstart + 80:zstart + 200, :] = ht[0, 0:120, 80:120, 80:200, :] if jx == x_sample - 1 and jz == 0: hp[xstart + 80:xstart + 200, ystart+80:ystart + 120, zstart:zstart + 120, :] = ht[0, 80:200, 80:120, 0:120, :] if jx == x_sample - 1 and jz == z_sample - 1: hp[xstart + 80:xstart + 200, ystart + 80:ystart + 120, zstart + 80:zstart + 200, :] = ht[0, 80:200, 80:120, 80:200, :] # then the 4 corner...yz if jy == 0 and jz == 0: hp[xstart+80:xstart + 120, ystart:ystart + 120, zstart:zstart + 120, :] = ht[0, 80:120, 0:120, 0:120, :] if jy == 0 and jz == z_sample - 1: hp[xstart+80:xstart + 120, ystart:ystart + 120, zstart + 80:zstart + 200, :] = ht[0, 80:120, 0:120, 80:200, :] if jy == y_sample - 1 and jz == 0: hp[xstart + 80:xstart + 120, ystart+80:ystart + 200, zstart:zstart + 120, :] = ht[0, 80:120, 80:200, 0:120, :] if jy == y_sample - 1 and jz == z_sample - 1: hp[xstart + 80:xstart + 120, ystart + 80:ystart + 200, zstart + 80:zstart + 200, :] = ht[0, 80:120, 80:200, 80:200, :] # the last 8 small corners.. if jx == 0 and jy == 0 and jz == 0: hp[xstart:xstart + 120, ystart:ystart + 120, zstart:zstart + 120, :] = ht[0, 0:120, 0:120, 0:120, :] if jx == 0 and jy == 0 and jz == z_sample - 1: hp[xstart:xstart + 120, ystart:ystart + 120, zstart + 80:zstart + 200, :] = ht[0, 0:120, 0:120, 80:200, :] if jx == 0 and jy == y_sample - 1 and jz == 0: hp[xstart:xstart + 120, ystart + 80:ystart + 200, zstart:zstart + 120, :] = ht[0, 0:120, 80:200, 0:120, :] if jx == 0 and jy == y_sample - 1 and jz == z_sample - 1: hp[xstart:xstart + 120, ystart + 80:ystart + 200, zstart + 80:zstart + 200, :] = ht[0, 0:120, 80:200, 80:200, :] if jx == x_sample - 1 and jy == 0 and jz == 0: hp[xstart + 80:xstart + 200, ystart:ystart + 120, zstart:zstart + 120, :] = ht[0, 80:200, 0:120, 0:120, :] if jx == x_sample - 1 and jy == 0 and jz == z_sample - 1: hp[xstart + 80:xstart + 200, ystart:ystart + 120, zstart + 80:zstart + 200, :] = ht[0, 80:200, 0:120, 80:200, :] if jx == x_sample - 1 and jy == y_sample - 1 and jz == 0: hp[xstart + 80:xstart + 200, ystart + 80:ystart + 200, zstart:zstart + 120, :] = ht[0, 80:200, 80:200, 0:120, :] if jx == x_sample - 1 and jy == y_sample - 1 and jz == z_sample - 1: hp[xstart + 80:xstart + 200, ystart + 80:ystart + 200, zstart + 80:zstart + 200, :] = ht[0, 80:200, 80:200, 80:200, :] print('processing Brain Structure Aware Model.. ' + str(jx/x_sample*100) + '%') if z_range < 200: hp = hp[:,:,0:z_range] print('processing Brain Structure Aware Model.. ' + '100%') time_end = time.time() print('Time cost of test at case ' + str(kv + 1) + ' for stage 1 has been ' + str(time_end - time_start) + ' s') savename = '%s%s%s' % (savepath, str(kv + 1), '/Reconstruction_BrainStructureAwareModel.mat') sio.savemat(savename, {'Reconstruction': hp}) print('################ case ' + str(kv + 1) + ' has been done for Brain Structure Aware Model ################') sess.close() def test_SpatialConnectionAwareNetwork(): seq_length = 2 network_template = tf.make_template('network', SpatialConnectionAwareNetwork) filespath = args.datafile axialThickpath = filespath + 'axialThick-test.txt' sagittalThickspath = filespath + 'sagittalThicks-test.txt' savepath = args.savepath + 'test' modelpath = args.modelpath + 'SpatialConnectionAwareNetwork/Model40.ckpt' val_axialfile = open(axialThickpath) val_axialread = val_axialfile.read().splitlines() val_sagittalfile = open(sagittalThickspath) val_sagittalread = val_sagittalfile.read().splitlines() ntest = len(val_axialread) axialThinpath = filespath + 'axialThin-test.txt' val_GTfile = open(axialThinpath) val_GTread = val_GTfile.read().splitlines() with tf.name_scope('input'): LR = tf.placeholder(tf.float32, shape=[batch_size, seq_length, 360, 432, 3], name='Lowresolute_image') # conv network hidden = None x_1, hidden = network_template(LR[:, 0, :, :, :], hidden) x_2, hidden = network_template(LR[:, 1, :, :, :], hidden) config = tf.ConfigProto() config.gpu_options.visible_device_list = str(args.gpu) sess = tf.Session(config=config) saver = tf.train.Saver() saver.restore(sess, modelpath) print("Model loaded") for kv in range(0, ntest): time_start = time.time() print('Loading from test case ' + str(kv + 1) + ' for stage 2 for test') matinput0 = val_axialread[kv] load_data_input0 = sio.loadmat(matinput0) datainput0 = load_data_input0['T1_image'] Input_full0cut = datainput0[int(datainput0.shape[0] / 2 - 180):int(datainput0.shape[0] / 2 + 180), int(datainput0.shape[1] / 2 - 216):int(datainput0.shape[1] / 2 + 216), :] datainput0 = np.transpose(Input_full0cut, [2, 0, 1]) matinput1 = savepath + str(kv + 1) + '/Reconstruction_BrainStructureAwareModel.mat' load_data_input1 = sio.loadmat(matinput1) datainput1 = load_data_input1['Reconstruction'] datainput1m = datainput1[:, :, :, 0] Input_fullrcut = datainput1m[int(datainput1m.shape[0] / 2 - 180):int(datainput1m.shape[0] / 2 + 180), int(datainput1m.shape[1] / 2 - 216):int(datainput1m.shape[1] / 2 + 216), :] datainput1 = np.transpose(Input_fullrcut, [2, 0, 1]) test_sagittalname = val_sagittalread[kv] load_data_input2 = sio.loadmat(test_sagittalname) datainput2m = load_data_input2['T2s2_image'] Input_full2cut = datainput2m[int(datainput1m.shape[0] / 2 - 180):int(datainput1m.shape[0] / 2 + 180), int(datainput1m.shape[1] / 2 - 216):int(datainput1m.shape[1] / 2 + 216), :] if Input_full2cut.shape[2] < 2 * datainput1m.shape[2]: Input_full2cut = np.dstack((Input_full2cut, Input_full2cut[:, :, Input_full2cut.shape[2] - 1])) datainputsag = np.transpose(Input_full2cut, [2, 0, 1]) totalnum = datainput1.shape[0] def feed_dict(j): pointer = j xs = np.zeros((batch_size, seq_length, 360, 432, 3)) xs[:, 0, :, :, 0] = datainput1[pointer, 0:360, 0:432] xs[:, 0, :, :, 1] = datainput0[pointer, 0:360, 0:432] xs[:, 0, :, :, 2] = datainputsag[2 * pointer, 0:360, 0:432] xs[:, 1, :, :, 0] = datainput1[pointer, 0:360, 0:432] xs[:, 1, :, :, 1] = datainput0[pointer, 0:360, 0:432] xs[:, 1, :, :, 2] = datainputsag[2 * pointer + 1, 0:360, 0:432] return {LR: xs} hp = datainput1m for j in range(0, np.int16(totalnum)): ht = sess.run(x_2, feed_dict=feed_dict(j)) hp[int(datainput1m.shape[0] / 2 - 180):int(datainput1m.shape[0] / 2 + 180), int(datainput1m.shape[1] / 2 - 216):int(datainput1m.shape[1] / 2 + 216), j] = ht[0, :, :, 0] time_end = time.time() print('Time cost of test at case ' + str(kv + 1) + ' for stage 2 has been ' + str(time_end - time_start) + ' s') savename = '%s%s%s' % (savepath, str(kv + 1), '//Reconstruction_DeepVolume.mat') sio.savemat(savename, {'Reconstruction': hp}) # load the brain mask, which was generated based on the axial thin MRI c1map = val_GTread[kv][0:-4] + 'c1.nii' c1load = nib.load(c1map) c1im = c1load.get_fdata() c2map = val_GTread[kv][0:-4] + 'c2.nii' c2load = nib.load(c2map) c2im = c2load.get_fdata() c3map = val_GTread[kv][0:-4] + 'c3.nii' c3load = nib.load(c3map) c3im = c3load.get_fdata() cim = c1im + c2im + c3im RecIntensity = np.abs((hp * Intensity_std + Intensity_mean) * Intensity_max) imgToSave = np.int16(RecIntensity * cim) npDtype = np.dtype(np.int16) proxy_origin = nib.load(c1map) affine_origin = proxy_origin.affine proxy_origin.uncache() newImg = nib.Nifti1Image(imgToSave, affine_origin) newImg.set_data_dtype(npDtype) nib.save(newImg, savepath + str(kv + 1) + '//pred.nii.gz') print('################ case ' + str(kv + 1) + ' has been done for Spatial Connection Aware Model ################') sess.close() def evaluation(): filespath = args.datafile savepath = args.savepath + 'test' axialThinpath = filespath + 'axialThin-test.txt' val_GTfile = open(axialThinpath) val_GTread = val_GTfile.read().splitlines() ntest = len(val_GTread) PSNRall = [] print('################################ Doing evaluation ################################') for kv in range(0, ntest): predmap = savepath + str(kv + 1) + '//pred.nii.gz' predload = nib.load(predmap) predim = np.uint8(predload.get_fdata()) matGT = val_GTread[kv] load_data_input0 = sio.loadmat(matGT) dataGT = load_data_input0['T3_image'] GTIntensity = (dataGT * Intensity_std + Intensity_mean) * Intensity_max c1map = val_GTread[kv][0:-4] + 'c1.nii' c1load = nib.load(c1map) c1im = c1load.get_fdata() c2map = val_GTread[kv][0:-4] + 'c2.nii' c2load = nib.load(c2map) c2im = c2load.get_fdata() c3map = val_GTread[kv][0:-4] + 'c3.nii' c3load = nib.load(c3map) c3im = c3load.get_fdata() cim = c1im + c2im + c3im GTim = np.uint8(GTIntensity * cim) Resultpsnr = psnr(predim, GTim) PSNRall.append(Resultpsnr) print('PSNR of case ' + str(kv+1) + ' is ' + str(Resultpsnr)) print('average PSNR is ' + str(np.mean(PSNRall))) def psnr(img1, img2): mse = np.mean((np.double(img1) -
np.double(img2)
numpy.double
import os import sys import numpy as np import torch class DETECTION_UTILS: def __init__(self) -> None: pass @staticmethod def calc_brick_volume(bricks): ''' bricks: [N, x0, y0, z0, x1, y1, z1] ''' delta_bricks = bricks[:, 3:] - bricks[:,:3] # assert np.all(delta_bricks > 0) delta_bricks.clip(min = 0) volumes = delta_bricks[:,0] * delta_bricks[:,1] * delta_bricks[:,2] return volumes @staticmethod def calc_brick_iou(bricks1, bricks2): ''' boxes: [N, x0,y0,z0,x1,y1,z1] ''' v1 = DETECTION_UTILS.calc_brick_volume(bricks1) v2 = DETECTION_UTILS.calc_brick_volume(bricks2) pt_min = np.maximum(bricks1[:, :3], bricks2[:, :3]) pt_max =
np.minimum(bricks1[:, 3:], bricks2[:, 3:])
numpy.minimum
''' ## Data Loader ## # Creates a data generator which loads and preprocesses the demonstrations into pairs of ranked trajectories @author: <NAME> (<EMAIL>) ''' import tensorflow as tf import numpy as np import os class DataGenerator: def __init__(self, data_dir, batch_size, traj_len, n_workers, preprocessing_offline): self.traj_len = traj_len if preprocessing_offline: data_dir += '_preprocessed' self.np_list = self.list_np_files(data_dir) data = tf.data.Dataset.from_tensor_slices(self.np_list) data = data.shuffle(len(self)).repeat() if preprocessing_offline: # The demonstrations data has already been preprocessed into trajectory pairs # Run preprocess function on trajectory pairs (filtering out invalid examples where reward values are equal), and prefetch parsed samples into buffer (must use tf.py_func as we need a python function to load the np files) data = data.map(lambda np_sample: tf.py_func(self._process_trajectory_pairs, [np_sample], [tf.float32, tf.float32, tf.double, tf.double]), num_parallel_calls=n_workers).prefetch(buffer_size=5*batch_size) # Batch samples together # data = data.batch(batch_size, drop_remainder=True) # if tf version >= 1.10 data = data.apply(tf.contrib.data.batch_and_drop_remainder(batch_size)) # if tf version < 1.10 else: # The data is still in the form of demonstrations, we need to preprocess and generate the trajectories live # Batch into pairs of samples # data = data.batch(2, drop_remainder=True) # if tf version >= 1.10 data = data.apply(tf.contrib.data.batch_and_drop_remainder(2)) # if tf version < 1.10 # Run preprocess function on demonstrations (filtering out invalid examples where reward values are equal) into trajectory samples, and prefetch parsed samples into buffer (must use tf.py_func as we need a python function to load the np files) data = data.map(lambda np_sample: tf.py_func(self._process_demonstrations, [np_sample], [tf.float32, tf.float32, tf.double, tf.double]), num_parallel_calls=n_workers).filter(lambda low_traj, high_traj, low_reward, high_reward: tf.not_equal(low_reward, high_reward)).prefetch(buffer_size=5*batch_size) # Batch samples together # data = data.batch(batch_size, drop_remainder=True) # if tf version >= 1.10 data = data.apply(tf.contrib.data.batch_and_drop_remainder(batch_size)) # if tf version < 1.10 self.data = data def __len__(self): return len(self.np_list) def _process_trajectory_pairs(self, traj_pair_file): """Preprocessing function for already-generated trajectory pairs""" traj_pair = np.load(traj_pair_file) low_reward_snippet = traj_pair['low_reward_traj'].copy() high_reward_snippet = traj_pair['high_reward_traj'].copy() low_reward = traj_pair['low_reward_value'].copy() high_reward = traj_pair['high_reward_value'].copy() # Convert int to float and normalise to [0.0, 1.0] range low_reward_snippet = low_reward_snippet.astype(np.float32, copy=False) low_reward_snippet /= 255.0 high_reward_snippet = high_reward_snippet.astype(np.float32, copy=False) high_reward_snippet /= 255.0 return low_reward_snippet, high_reward_snippet, low_reward, high_reward def _process_demonstrations(self, demonstrations_files): """Preprocessing function for demonstrations - Generates preprocessed trajectory pairs from demonstrations""" filenames = [demonstrations_files[0].decode('UTF-8'), demonstrations_files[1].decode('UTF-8')] # Sort filenames into [lower_reward, higher_reward] order filenames = sorted(filenames, key=lambda x: self.extract_reward(x)) low_reward = self.extract_reward(filenames[0]) high_reward = self.extract_reward(filenames[1]) # Only process samples further if they do not have equal rewards (as samples with equal rewards will be discarded later) if low_reward != high_reward: low_reward_traj =
np.load(filenames[0])
numpy.load
# -*- coding: utf-8 -*- """ Created on Wed Dec 5 14:40:15 2018 This is the module to extract the road users coexisting with a given ego user @author: cheng """ import numpy as np from sklearn.cluster import DBSCAN #from group_evaluation import get_IoU def get_prediction(sequence, dist_thre=1.5, ratio=0.90, max_friends=100): ''' Extract ego user's using group_detection ''' Detector = Group_Detection(data=sequence, dist_thre=dist_thre, ratio_thre=ratio) # Define the largest number of friends an ego user can have (here, include the ego user self) # This number must be large enough to harvest all possibilities t_friends = np.zeros([Detector.userList.shape[-1], max_friends]) for count, egoUserId in enumerate(Detector.userList): userData = Detector.data[Detector.data[:, 1]==egoUserId, :] if egoUserId != 0: egoUserFl = np.unique(userData[:, 0]) frameData = Detector.get_frame_data(egoUserFl) friends = Detector.frame_DBscan(frameData, egoUserId, egoUserFl) store_fl = np.append([egoUserId], friends) t_friends[count, 0:store_fl.shape[-1]] = store_fl return t_friends class Group_Detection(): ''' This is the class for group detection, which is a time sequence DBSCAN: DBSCAN_friend: Using DBSCAN to cluster friends into group based on Euclidean distance ''' def __init__(self, data, dist_thre=3, ratio_thre=0.9): ''' params: data_dir: it is the place where trajectory data resident dist_thre: Euclidean distance threshold for defining a friend ratio_thre: overlap threshold for defining a friend ''' # Store paramters self.data = data self.dist_thre = dist_thre self.ratio_thre = ratio_thre # Get the list for all the unique frames self.frameList = np.unique(self.data[:, 0]) # print('Frame list: ', self.frameList) # Get the list for all unique users self.userList = np.unique(self.data[:, 1]) # print('\nuser list: ', self.userList) def get_frame_data(self, frameList): ''' This is the function to get the data within the list of frames params: frameList: the list of the frames to be considered ''' frameData = np.empty(shape=[0, 4]) for frame in frameList: fData = self.data[self.data[:, 0]==frame, :] frameData =
np.vstack((frameData, fData))
numpy.vstack
import numpy as np def compute_iou(box, boxes, area, areas): """ compute ious of box over boxes :param box: :param boxes: :param area: area of box :param areas: areas of target boxes :return: ious """ # max of top left and min of bottom down x1 = np.maximum(box[0], boxes[:, 0]) y1 = np.maximum(box[1], boxes[:, 1]) x2 =
np.minimum(box[2], boxes[:, 2])
numpy.minimum
#!/usr/bin/python3 # -*- coding=utf-8 -*- """Data process utility functions.""" import numpy as np import cv2 from PIL import Image, ImageEnhance, ImageFilter def rand(a=0, b=1): return np.random.rand()*(b-a) + a def random_grayscale(image, prob=.2): """ Random convert image to grayscale # Arguments image: origin image for grayscale convert numpy image array prob: probability for grayscale convert, scalar to control the convert probability. # Returns image: adjusted numpy image array. """ convert = rand() < prob if convert: #convert to grayscale first, and then #back to 3 channels fake RGB image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) return image def random_chroma(image, jitter=.5): """ Random adjust chroma (color level) for image # Arguments image: origin image for grayscale convert numpy image array jitter: jitter range for random chroma, scalar to control the random color level. # Returns new_image: adjusted numpy image array. """ enh_col = ImageEnhance.Color(Image.fromarray(image.astype(np.uint8))) color = rand(jitter, 1/jitter) new_image = enh_col.enhance(color) return
np.array(new_image)
numpy.array
import numpy as np import configparser from typing import Callable from math import sqrt, pi, gamma import os import numbers from scipy.spatial.transform.rotation import Rotation import plotly.graph_objs as go def create_dir(dirname: str): """Creates a dictionary if it does not already exist.""" if not os.path.exists(dirname): os.makedirs(dirname) def read_cfg(cfg_name: str) -> dict: """Reads a config from text format into a dictionary.""" cfg = configparser.ConfigParser() if cfg_name[-3:] == 'cfg': cfg.read(cfg_name) else: cfg.read_string(cfg_name) c = dict() c['SEED'] = cfg.getint('general', 'SEED') c['CONV_TOL'] = cfg.getfloat('general', 'CONV_TOL') c['N'] = cfg.getint('general', 'N') c['ALPHA'] = cfg.getfloat('general', 'ALPHA', fallback=1.0) c['BETA'] = cfg.getfloat('general', 'BETA', fallback=0.0) c['COLLISION_RES'] = cfg.getfloat('general', 'COLLISION_RES', fallback=1.0) c['EPS'] = cfg.getfloat('general', 'EPS', fallback=1e-2) c['RHO'] = cfg.getfloat('general', 'RHO', fallback=5e-1) c['R_MAX'] = cfg.getfloat('general', 'R_MAX', fallback=1e-1) c['GREEDY'] = cfg.getboolean('general', 'GREEDY', fallback=False) c['PROJ_STEP_SIZE'] = cfg.getfloat('general', 'PROJ_STEP_SIZE', fallback=1.0) if not os.path.exists('plots'): os.makedirs('plots') print(c) return c def check_limits(value, min_value, max_value) -> bool: """Check if all values are between [min_value, max_value]. Three input types are possible: - value, min_value, max_value are scalars - value is an array and min_value, max_value are scalars - value, min_value, max_value are arrays and an elementwise check is performed """ # value, max and min are arrays if (type(value) is np.ndarray) and type(max_value) is np.ndarray and type(min_value) is np.ndarray: for i in range(value.size): if value.item(i) > max_value.item(i) or value.item(i) < min_value.item(i): return False return True # value is array, max and min are floats if (type(value) is np.ndarray) and type(max_value) is float and type(min_value) is float: for i in range(value.size): if value.item(i) > max_value or value.item(i) < min_value: return False return True # value, max, min are floats if isinstance(value, numbers.Number): return value <= max_value and value >= min_value raise TypeError("inputs to limit_value have to be arrays or floats, but is", type(value)) def unit_ball_measure(n: int) -> float: """Computes volume of unit ball of dimension n.""" return (sqrt(pi) ** n) / gamma(float(n) / 2.0 + 1.0) def path_cost(path: list) -> float: """Computes length of a path.""" cost = 0.0 if type(path[0]) is list: for path_i in path: for i in range(1, len(path_i)): cost += np.linalg.norm(path_i[i-1] - path_i[i]) else: for i in range(1, len(path)): cost += np.linalg.norm(path[i-1] - path[i]) return cost def is_on_manifold(m: Callable[[np.ndarray], np.ndarray], q: np.ndarray, eps=1e-4) -> bool: """Checks if a configuration q is on a manifold defined by a level set m(q).""" return np.linalg.norm(m.y(q)) < eps def get_volume(low: list, up: list) -> float: """Returns the volume defined of a box defined by lower and upper limits.""" vol = 1.0 for i in range(len(low)): vol = vol * (up[i] - low[i]) return vol def plot_box(pd: list, pos: np.ndarray, quat: np.ndarray, size: np.ndarray): """Plots a box in plotly at location pos with rotation quat and dimensions size.""" d = -size p = size X = np.array([[d[0], d[0], p[0], p[0], d[0], d[0], p[0], p[0]], [d[1], p[1], p[1], d[1], d[1], p[1], p[1], d[1]], [d[2], d[2], d[2], d[2], p[2], p[2], p[2], p[2]]]) R = Rotation.from_quat(quat) X = R.apply(X.T) + pos pd.append(go.Mesh3d( x=X[:, 0], y=X[:, 1], z=X[:, 2], flatshading=True, lighting=dict(facenormalsepsilon=0), lightposition=dict(x=2000, y=1000), color='black', i=[7, 0, 0, 0, 4, 4, 6, 6, 4, 0, 3, 2], j=[3, 4, 1, 2, 5, 6, 5, 2, 0, 1, 6, 3], k=[0, 7, 2, 3, 6, 7, 1, 1, 5, 5, 7, 6], name='y', showscale=False ) ) class Projection: """This class implements an iterative optimization routine that projects configurations onto a constraint.""" def __init__(self, f: Callable[[np.ndarray], np.ndarray], J: Callable[[np.ndarray], np.ndarray], tol: float = 1e-5, max_iter: int = 200, step_size: float = 1.0): self.f = f self.J = J self.tol = tol self.max_iter = max_iter self.step_size = step_size def project(self, q: np.ndarray) -> (bool, np.ndarray): """Projects a point onto the constraint and return a boolean indicating success and the projected point.""" y = self.f(q) y0 = 2.0 * np.linalg.norm(y) iter = 0 while np.linalg.norm(y) > self.tol and iter < self.max_iter and np.linalg.norm(y) < y0: J = self.J(q) q = q - self.step_size * np.linalg.lstsq(J, y, rcond=-1)[0] y = self.f(q) iter += 1 result = np.linalg.norm(y) <= self.tol return result,
np.array(q)
numpy.array
''' numpy最终要的特点就是N维数组对象(ndarray),他是一个快速而领会的大数据集容器 可以对数据执行一些数学运算,其语法和标量元素相同. ''' from numpy.random import randn import numpy as np data = randn(2, 3) # 创建随机数2*3维 print(data) print(data*10) print(data + data) ''' output: data [[ 0.26474196 0.39938213 0.00644464] [-0.89070803 1.20568504 0.77317617]] data * 10 [[ 2.64741962 3.99382133 0.06444643] [ -8.90708032 12.05685039 7.73176167]] data + data [[ 0.52948392 0.79876427 0.01288929] [-1.78141606 2.41137008 1.54635233]] ''' ''' 需要注意的是ndarray是一个通用的同构数据多维容器, 也就是说**所有元素的类型必须相同** 同时,每个数组都有一个shape(各维大小的元组)属性和一个dtype(用于说明数组数据类型的对象) ''' arr = [1, '2', 'a', 'c'] d = np.array(arr) # 会把数组中不同类型的元素转换成相同类型 print(d) ''' output: ['1' '2' 'a' 'c'] ''' print(d.shape, d.dtype) ''' output: (4,) <U21 ''' data2 = [[1, 2, 3, 4], [5, 6, 7, 8]] # 创建多维数组 arr2 = np.array(data2) # 转换成多维 print(arr2.ndim, arr2.shape) ''' output: 2 (2, 4) ''' np.zeros(10) # 创建一维10个数据都为0的数组 np.zeros((3, 6)) # 创建一个3*6数据都为0的数组 np.empty((2, 3, 2)) # 创建2*3*2的数组 ''' 创建数组函数 array 将输入数据转换为ndarray. asarray 将输入数据转换为ndarray. arange 类似内置range.返回ndarray ones, 根据指定的形状和dtype创建一个全1的数组. ones_like ones_like以另一个数组为参数, 并根据器形状和dtype创建一个全1的数组 zeros zeros_like 类似上一个 empty empty_like 同上 eye, identity 穿件一个N*N单位矩阵(对角线为1, 其余为0) ''' # ndarray的type可以显示转换 array = np.array([1, 2, 3, 4, 5]) float_arr = array.astype(np.float64) # 整形转换为浮点型 print(float_arr.dtype) ''' output: float64 ''' array = np.array([3.7, -1.2, -2.6, 0.5, 12.9, 10.1]) array = array.astype(np.int32) # 浮点型转换为整型(小数部分被截断) print(array) ''' output: [ 3 -1 -2 0 12 10] ''' ''' ### 数组与标量之间的运算 数组可以不用编写循环就可以对数据进行批量运算(也就是矢量化). 大小相同的数组运算会将运算应用到元素级 ''' arr = np.array([[1., 2., 3.], [4., 5., 6.]]) print(arr) ''' output: [[ 1. 2. 3.] [ 4. 5. 6.]] ''' print(arr * arr) ''' output: [[ 1. 4. 9.] [ 16. 25. 36.]] ''' # 同样的,数组与标量间的计算也会传播到元素级(不同大小的数组运算叫广播) print(1 / arr) ''' output: [[ 1. 0.5 0.33333333] [ 0.25 0.2 0.16666667]] ''' ''' ## 基本的切片与索引 类似于python内置的list和tuple ### 花式索引 花式索引是numpy的术语, 他是利用整数数组进行索引 ''' arr = np.empty((8, 4)) for i in range(8): arr[i] = i print(arr) ''' output: [[ 0. 0. 0. 0.] [ 1. 1. 1. 1.] [ 2. 2. 2. 2.] [ 3. 3. 3. 3.] [ 4. 4. 4. 4.] [ 5. 5. 5. 5.] [ 6. 6. 6. 6.] [ 7. 7. 7. 7.]] ''' print(arr[[4, 3, 0, 6, 5]]) # 选取索引为4, 3, 0, 6, 5的行 ''' output: [[ 4. 4. 4. 4.] [ 3. 3. 3. 3.] [ 0. 0. 0. 0.] [ 6. 6. 6. 6.] [ 5. 5. 5. 5.]] ''' print(arr[[-1, -5]]) ''' output: [[ 7. 7. 7. 7.] [ 3. 3. 3. 3.]] ''' # 一次性传入多个索引会返回一个一维数组 arr = np.empty((8, 4)) for i in range(1, 9): for j in range(1, 5): arr[i-1][j-1] = i * j print(arr) ''' output: [[ 1. 2. 3. 4.] [ 2. 4. 6. 8.] [ 3. 6. 9. 12.] [ 4. 8. 12. 16.] [ 5. 10. 15. 20.] [ 6. 12. 18. 24.] [ 7. 14. 21. 28.] [ 8. 16. 24. 32.]] ''' print(arr[[1, 5, 7, 2], [0, 3, 1, 2]]) # 返回(1, 0), (5, 3), (7,1), (2, 2)这四个元素 # output: [ 2. 24. 16. 9.] print(arr[[1, 5, 7, 2]][:, [0, 3, 1, 2]]) # 以返回(1, 0), (1, 3), (1, 1), (1, 2) # (5, 0), ... # (7, 0), ... # (2, 0), ... # 4*4的数组 ''' output: [[ 2. 8. 4. 6.] [ 6. 24. 12. 18.] [ 8. 32. 16. 24.] [ 3. 12. 6. 9.]] ''' arr_ix = arr[np.ix_([1, 5, 7, 2], [0, 3, 1, 2])] print(arr_ix) ''' output: [[ 2. 8. 4. 6.] [ 6. 24. 12. 18.] [ 8. 32. 16. 24.] [ 3. 12. 6. 9.]] ''' ''' numpy可以简化数据处理,避免了python写循环,用数组表达式代替循环的做法一般叫矢量化 一般来说,矢量化的运算要比纯Python的循环快上1-2个数量级 在一组树枝上计算(sqrt(x^2 + y^2)) ''' ''' from matplotlib.pyplot import imshow, title import matplotlib.pyplot as plt points = np.arange(-5, 5, 0.01) xs, ys = np.meshgrid(points, points) # np.meshgrid函数接受2个一维5数组,并产生2个二维矩阵(分别对应x和y) z = np.sqrt(xs ** 2 + ys ** 2) plt.imshow(z, cmap=plt.cm.gray) plt.colorbar() ''' # plt.show() # 将条件逻辑表述为数组运算 # numpy.where函数是三元表达式x if condition else y的矢量版本 xarr = np.array([1.1, 1.2, 1.3, 1.4, 1.5]) yarr = np.array([2.1, 2.2, 2.3, 2.4, 2.5]) cond = np.array([True, False, True, True, False]) # 需要判断cond中的值为True时选取xarr, 为False时选取yarr result = [(x if c else y) for x, y, c in zip(xarr, yarr, cond)] # python版本 result = np.where(cond, xarr, yarr) print(result) ''' output: [ 1.1 2.2 1.3 1.4 2.5] ''' # np.where的第二个和第三个参数不一定是数组,它们可以是标量值. # 在数据分析中,where通常用于根据一个数组产生另一个数组. # 假设有一个由随机数数据组成的矩阵,希望把所有大于0的值替换为2, 小于0的值替换为-2 arr =
randn(4, 4)
numpy.random.randn
"""CEC2008 benchmarking functions. """ import numpy as np import opytimark.utils.decorator as d from opytimark.core import CECBenchmark # Fixes Numpy's random seed np.random.seed(0) class F1(CECBenchmark): """F1 class implements the Shifted Sphere's benchmarking function. .. math:: f(\mathbf{x}) = f(x_1, x_2, \ldots, x_n) = \sum_{i=1}^{n} z_i^2 - 450 \mid z_i = x_i - o_i Domain: The function is commonly evaluated using :math:`x_i \in [-100, 100] \mid i = \{1, 2, \ldots, n\}, n \leq 1000`. Global Minima: :math:`f(\mathbf{x^*}) = -450 \mid \mathbf{x^*} = \mathbf{o}`. """ def __init__(self, name='F1', year='2008', auxiliary_data=('o'), dims=1000, continuous=True, convex=True, differentiable=True, multimodal=False, separable=True): """Initialization method. Args: name (str): Name of the function. year (str): Year of the function. auxiliary_data (tuple): Auxiliary variables to be externally loaded. dims (int): Number of allowed dimensions. continuous (bool): Whether the function is continuous. convex (bool): Whether the function is convex. differentiable (bool): Whether the function is differentiable. multimodal (bool): Whether the function is multimodal. separable (bool): Whether the function is separable. """ super(F1, self).__init__(name, year, auxiliary_data, dims, continuous, convex, differentiable, multimodal, separable) @d.check_less_equal_dimension def __call__(self, x): """This method returns the function's output when the class is called. Args: x (np.array): An input array for calculating the function's output. Returns: The benchmarking function output `f(x)`. """ # Re-calculates the input z = x - self.o[:x.shape[0]] # Calculating the Shifted Sphere's function f = z ** 2 return np.sum(f) - 450 class F2(CECBenchmark): """F2 class implements the Shifted Schwefel's 2.21 benchmarking function. .. math:: f(\mathbf{x}) = f(x_1, x_2, \ldots, x_n) = \max_{i=1, \ldots, n}|z_i| - 450 \mid z_i = x_i - o_i Domain: The function is commonly evaluated using :math:`x_i \in [-100, 100] \mid i = \{1, 2, \ldots, n\}, n \leq 1000`. Global Minima: :math:`f(\mathbf{x^*}) = -450 \mid \mathbf{x^*} = \mathbf{o}`. """ def __init__(self, name='F2', year='2008', auxiliary_data=('o'), dims=1000, continuous=True, convex=True, differentiable=True, multimodal=False, separable=False): """Initialization method. Args: name (str): Name of the function. year (str): Year of the function. auxiliary_data (tuple): Auxiliary variables to be externally loaded. dims (int): Number of allowed dimensions. continuous (bool): Whether the function is continuous. convex (bool): Whether the function is convex. differentiable (bool): Whether the function is differentiable. multimodal (bool): Whether the function is multimodal. separable (bool): Whether the function is separable. """ super(F2, self).__init__(name, year, auxiliary_data, dims, continuous, convex, differentiable, multimodal, separable) @d.check_less_equal_dimension def __call__(self, x): """This method returns the function's output when the class is called. Args: x (np.array): An input array for calculating the function's output. Returns: The benchmarking function output `f(x)`. """ # Re-calculates the input z = x - self.o[:x.shape[0]] # Calculating the Schwefel's 2.21 function f = np.fabs(z) return np.amax(f) - 450 class F3(CECBenchmark): """F3 class implements the Shifted Rosenbrock's benchmarking function. .. math:: f(\mathbf{x}) = f(x_1, x_2, \ldots, x_n) = \sum_{i=1}^{n-1} (100(z_i^2-z_{i+1})^2 + (z_i - 1)^2) + 390 \mid z_i = x_i - o_i Domain: The function is commonly evaluated using :math:`x_i \in [-100, 100] \mid i = \{1, 2, \ldots, n\}, n \leq 1000`. Global Minima: :math:`f(\mathbf{x^*}) = -390 \mid \mathbf{x^*} = \mathbf{o} + 1`. """ def __init__(self, name='F3', year='2008', auxiliary_data=('o'), dims=1000, continuous=True, convex=True, differentiable=True, multimodal=True, separable=False): """Initialization method. Args: name (str): Name of the function. year (str): Year of the function. auxiliary_data (tuple): Auxiliary variables to be externally loaded. dims (int): Number of allowed dimensions. continuous (bool): Whether the function is continuous. convex (bool): Whether the function is convex. differentiable (bool): Whether the function is differentiable. multimodal (bool): Whether the function is multimodal. separable (bool): Whether the function is separable. """ super(F3, self).__init__(name, year, auxiliary_data, dims, continuous, convex, differentiable, multimodal, separable) @d.check_less_equal_dimension def __call__(self, x): """This method returns the function's output when the class is called. Args: x (np.array): An input array for calculating the function's output. Returns: The benchmarking function output `f(x)`. """ # Re-calculates the input z = x - self.o[:x.shape[0]] # Instantiating function f = 0 # For every input dimension for i in range(x.shape[0] - 1): # Calculating the Shifted Rosenbrock's function f += (100 * (z[i] ** 2 - z[i+1]) ** 2 + (z[i] - 1) ** 2) return f + 390 class F4(CECBenchmark): """F4 class implements the Shifted Rastrigin's benchmarking function. .. math:: f(\mathbf{x}) = f(x_1, x_2, \ldots, x_n) = \sum_{i=1}^{n} (z_i^2 - 10cos(2 \\pi z_i) + 10) - 330 \mid z_i = x_i - o_i Domain: The function is commonly evaluated using :math:`x_i \in [-5, 5] \mid i = \{1, 2, \ldots, n\}, n \leq 1000`. Global Minima: :math:`f(\mathbf{x^*}) = -330 \mid \mathbf{x^*} = \mathbf{o}`. """ def __init__(self, name='F4', year='2008', auxiliary_data=('o'), dims=1000, continuous=True, convex=True, differentiable=True, multimodal=True, separable=True): """Initialization method. Args: name (str): Name of the function. year (str): Year of the function. auxiliary_data (tuple): Auxiliary variables to be externally loaded. dims (int): Number of allowed dimensions. continuous (bool): Whether the function is continuous. convex (bool): Whether the function is convex. differentiable (bool): Whether the function is differentiable. multimodal (bool): Whether the function is multimodal. separable (bool): Whether the function is separable. """ super(F4, self).__init__(name, year, auxiliary_data, dims, continuous, convex, differentiable, multimodal, separable) @d.check_less_equal_dimension def __call__(self, x): """This method returns the function's output when the class is called. Args: x (np.array): An input array for calculating the function's output. Returns: The benchmarking function output `f(x)`. """ # Re-calculates the input z = x - self.o[:x.shape[0]] # Calculating the Shifted Rastrigin's function f = z ** 2 - 10 * np.cos(2 * np.pi * z) + 10 return np.sum(f) - 330 class F5(CECBenchmark): """F5 class implements the Shifted Griewank's benchmarking function. .. math:: f(\mathbf{x}) = f(x_1, x_2, \ldots, x_n) = 1 + \sum_{i=1}^{n}\\frac{x_i^2}{4000} - \prod cos(\\frac{x_i}{\sqrt{i}}) - 180 \mid z_i = x_i - o_i Domain: The function is commonly evaluated using :math:`x_i \in [-600, 600] \mid i = \{1, 2, \ldots, n\}, n \leq 1000`. Global Minima: :math:`f(\mathbf{x^*}) = -180 \mid \mathbf{x^*} = \mathbf{o}`. """ def __init__(self, name='F5', year='2008', auxiliary_data=('o'), dims=1000, continuous=True, convex=True, differentiable=True, multimodal=True, separable=False): """Initialization method. Args: name (str): Name of the function. year (str): Year of the function. auxiliary_data (tuple): Auxiliary variables to be externally loaded. dims (int): Number of allowed dimensions. continuous (bool): Whether the function is continuous. convex (bool): Whether the function is convex. differentiable (bool): Whether the function is differentiable. multimodal (bool): Whether the function is multimodal. separable (bool): Whether the function is separable. """ super(F5, self).__init__(name, year, auxiliary_data, dims, continuous, convex, differentiable, multimodal, separable) @d.check_less_equal_dimension def __call__(self, x): """This method returns the function's output when the class is called. Args: x (np.array): An input array for calculating the function's output. Returns: The benchmarking function output `f(x)`. """ # Re-calculates the input z = x - self.o[:x.shape[0]] # Initializing terms term1, term2 = 0, 1 # For every possible dimension of `x` for i in range(x.shape[0]): # Calculating first term term1 += (z[i] ** 2) / 4000 # Calculating second term term2 *= np.cos(z[i] /
np.sqrt(i + 1)
numpy.sqrt
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon May 6 10:37:16 2019 @authors: <NAME>, <NAME>, <NAME> File Description: The utility file used to load and preprocess data, format results, and save results. """ # Libraries from time import time from os.path import exists, join from os import mkdir from numpy import mean, std, sum, min, delete from pandas import read_csv, concat # Hyper-Pareameters / CONSTANTS SRC_NULL = 100 # Original Null Value DST_NULL = -98 # Changed Null Value MIN_WAPS = 9 # Minimum number of WAPS per sample. def load_data(train_fname, val_fname, N, drop_columns=None, dst_null=DST_NULL, drop_val=False): ''' Loads both the training and validation data (if drop_val is False), concatenates the datasets into one dataset. Splits the dataset into data and labels (X and Y). Replaces Null values and sets all lower null values to the replaced value. Normalizes data between 0 and 1 where 0 is weak intensity and 1 is strong intensity. Parameters: train_fname : (str) file name of training data - *.csv val_fname : (str) file name of validation data - *.csv N : (int) number of features drop_columns : (list) column names to be removed from data dst_null : (int) the value to change all null values to drop_val : (boolean) if true then drops validation data Returns : x_train : (Dataframe) training data y_train : (Dataframe) training labels x_test : (Dataframe) test data y_test : (Dataframe) test labels ''' tic = time() # Start function performance timer if drop_val: data = read_csv("data/" + train_fname) else: training_data = read_csv("data/" + train_fname) validation_data = read_csv("data/" + val_fname) data = concat((training_data, validation_data), ignore_index=True) if drop_columns: # Drop useless columns if there are any specified. data.drop(columns=drop_columns, inplace=True) data = data[data.PHONEID != 17] # Phone 17s data is clearly corrupted. # Split data from labels X = data.iloc[:, :N] Y = data.iloc[:, N:] # Change null value to new value and set all lower values to it. X.replace(SRC_NULL, dst_null, inplace=True) X[X < dst_null] = dst_null # Remove samples that have less than MIN_WAPS active WAPs # Normalize data between 0 and 1 where 1 is strong signal and 0 is null X /= min(X) X = 1 - X toc = time() # Report function performance timer print("Data Load Timer: %.2f seconds" % (toc-tic)) return X, Y def filter_out_low_WAPS(data, labels, num_samples=MIN_WAPS): ''' Removes samples from the data that do not contain at least MIN_WAPS of non-null intensities. Parameters: data : (ndarray) 2D array for WAP intensities labels : (ndarray) 2D array for labels num_samples : (int) the mim required number of non-null values Returns: new_data : (ndarray) 2D array for WAP intensities new_labels : (ndarray) 2D array for labels ''' drop_rows = list() for i, x in enumerate(data): count =
sum(x != DST_NULL)
numpy.sum
#!/usr/bin/python3 # Copyright (C) 2020 Intel Corporation from html import escape from urllib.parse import parse_qs from flup.server.fcgi import WSGIServer import threading import json import base64 import os import time import sys import wave import datetime import numpy as np import ctypes import inferservice_python as rt_api from fcgi_codec import CTCCodec import cv2 from shapely.geometry import Polygon import pyclipper import math import copy import logging import logging.handlers import socket syslog = logging.handlers.SysLogHandler(address='/dev/log') msgfmt = '%(asctime)s {0} %(name)s[%(process)d]: %(message)s'.format(socket.gethostname()) formatter = logging.Formatter(msgfmt, datefmt='%b %d %H:%M:%S') syslog.setFormatter(formatter) logger = logging.getLogger(os.path.basename(sys.argv[0])) logger.addHandler(syslog) logger.setLevel(logging.DEBUG) class formula(): def __init__(self, vocab_file): assert vocab_file.endswith(".json"), "vocab file must be json file" with open(vocab_file, "r") as file: dict_vocab = json.load(file) dict_vocab['id2sign'] = {int(i): j for i, j in dict_vocab['id2sign'].items()} self.index = dict_vocab["id2sign"] def get_formula(self, targets):#get latex formula from index phrase_formula = [] for target in targets: if target == 2: break phrase_formula.append( self.index.get(target, "?")) return " ".join(phrase_formula) def latex_preprocess_image( image_raw, tgt_shape): img_h, img_w = image_raw.shape[0:2] target_height, target_width = tgt_shape new_h = min(target_height, img_h) new_w = min(target_width, img_w) image_raw=image_raw[:new_h, :new_w, :] image = cv2.copyMakeBorder(image_raw, 0, target_height - img_h, 0, target_width - img_w, cv2.BORDER_CONSTANT, None, (255,255,255)) return image def latex_recognizer(latex_crop_list, latex_encode_xml, latex_decode_xml, urlinfo,vocab, formula_result_list): for img in latex_crop_list: img_decode = formula.latex_preprocess_image(img, (160, 1400)) img_encode = cv2.imencode('.jpg', img_decode)[1] pic = list(img_encode) pics = [pic] other_pin = rt_api.vectorVecFloat() #prepare out dec_states_h = rt_api.vectorFloat() dec_states_c = rt_api.vectorFloat() output = rt_api.vectorFloat() row_enc_out = rt_api.vectorFloat() out = rt_api.vectorVecFloat() out.append(dec_states_h) out.append(dec_states_c) out.append(output) out.append(row_enc_out) res = rt_api.infer_image(pics, 3, other_pin, latex_encode_xml, out, urlinfo) logits = [] if res == 0: max_formula_len = 128 dec_states_h = out[0] dec_states_c = out[1] output = out[2] row_enc_out = out[3] tgt = [[[0]]] for _ in range(max_formula_len): decode_model = latex_decode_xml other_pin = rt_api.vectorVecFloat() other_pin.append(rt_api.vectorFloat(dec_states_c)) other_pin.append(rt_api.vectorFloat(output)) other_pin.append(rt_api.vectorFloat(row_enc_out)) other_pin.append(rt_api.vectorFloat([tgt[0][0][0]])) decode_out= rt_api.vectorVecFloat() decode_out1 = rt_api.vectorFloat() decode_out2 = rt_api.vectorFloat() decode_out3 = rt_api.vectorFloat() decode_out4 = rt_api.vectorFloat() decode_out.append(decode_out1) decode_out.append(decode_out2) decode_out.append(decode_out3) decode_out.append(decode_out4) input_data = rt_api.vectorVecFloat() x_pin1 = rt_api.vectorFloat(dec_states_h) input_data.append(x_pin1) input_vecs = rt_api.tripleVecFloat() input_vecs.append(input_data) res = rt_api.infer_common(input_vecs, other_pin, decode_model, "OPENVINO", decode_out, urlinfo) dec_states_h = decode_out[0] dec_states_c = decode_out[1] output = decode_out[3] logit = np.array(decode_out[2]).reshape(1,101) logits.append(logit) tgt = np.array([[np.argmax(logit, axis=1)]]) tgt = tgt.tolist() if tgt[0][0][0] == 2: break logits = np.array(logits) logits = logits.squeeze(axis=1) targets = np.argmax(logits, axis=1) formula_result = vocab.get_formula(targets) formula_result_list.append(formula_result) class chinese_handwritten(): def get_characters(charlist): '''Get characters''' with open(charlist, 'r', encoding='utf-8') as f: return ''.join(line.strip('\n') for line in f) def handwritten_image_preprocess(image, height, width): image_ratio = float(image.shape[1]) / float(image.shape[0]) rw = int(height * image_ratio) if rw <= 2000: resized_image = cv2.resize(image, (rw, height), interpolation=cv2.INTER_AREA).astype(np.float32) resized_img = resized_image[None, :, :] _, rh, rw = resized_img.shape pad_resized_img = np.pad(resized_img, ((0, 0), (0, height - rh), (0, width - rw)), mode='edge') else: image_ratio = width / image.shape[1] rh = int(image.shape[0] * image_ratio) resized_img = cv2.resize(image, (width, rh) , interpolation=cv2.INTER_AREA).astype(np.float32) resized_img = resized_img[None, :, :] _, rh, rw = resized_img.shape pad_resized_img = np.pad(resized_img, ((0, 0), (0, height - rh), (0, width - rw)), mode='edge') return pad_resized_img def handwritten_recognizer(handwritten_crop_list, model_xml, model_label, urlinfo, handwritten_result_list): for img in handwritten_crop_list: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img = chinese_handwritten.handwritten_image_preprocess(img, 96, 2000) img = img[0] image_data = cv2.imencode('.jpg',img)[1] pic = list(image_data) pics = [pic] other_pin = rt_api.vectorVecFloat() out1 = rt_api.vectorFloat() out = rt_api.vectorVecFloat() ##with opaque and stl_binding out.append(out1) res = rt_api.infer_image(pics, 3, other_pin, model_xml, out, urlinfo) if res == 0: char_label = chinese_handwritten.get_characters(model_label) code_ocr = CTCCodec(char_label, 20) predict = np.array(out[0]) predict = predict.reshape(186,1,4059) result = code_ocr.decode(predict) handwritten_result_list.append(result[0]) class ppocr(): def small_rectangle(contour_img): rectangle = cv2.minAreaRect(contour_img) left_top, right_top, right_down, left_down = 0, 1, 2, 3 box_points = sorted(list(cv2.boxPoints(rectangle)), key=lambda x: x[0]) if box_points[3][1] > box_points[2][1]: right_top = 2 right_down = 3 else: right_top = 3 right_down = 2 if box_points[1][1] > box_points[0][1]: left_top = 0 left_down = 1 else: left_top = 1 left_down = 0 rectangle_points = [box_points[left_top], box_points[right_top], box_points[right_down], box_points[left_down]] return rectangle_points, min(rectangle[1]) def rectangle_score(bit_img, _rectangle): rectangle = _rectangle.copy() h, w = bit_img.shape[:2] w_min = np.clip(np.floor(rectangle[:, 0].min()).astype(np.int), 0, w - 1) h_min = np.clip(np.floor(rectangle[:, 1].min()).astype(np.int), 0, h - 1) w_max = np.clip(np.ceil(rectangle[:, 0].max()).astype(np.int), 0, w - 1) h_max = np.clip(np.ceil(rectangle[:, 1].max()).astype(np.int), 0, h - 1) rectangle[:, 0] = rectangle[:, 0] - w_min rectangle[:, 1] = rectangle[:, 1] - h_min mask_img = np.zeros((h_max - h_min + 1, w_max - w_min + 1), dtype=np.uint8) cv2.fillPoly(mask_img, rectangle.reshape(1, -1, 2).astype(np.int32), 1) return cv2.mean(bit_img[h_min:h_max + 1, w_min:w_max + 1], mask_img)[0] def large_rectangle(rectangle): enlarge_ratio = 1 pco = pyclipper.PyclipperOffset() poly = Polygon(rectangle) length = poly.length area = poly.area ratio = area * enlarge_ratio / length pco.AddPath(rectangle, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON) expanded_box = np.array(pco.Execute(ratio)) return expanded_box def bit_img_boxes(predict, _bit_img, ori_width, ori_height): max_candidates = 1000 bit_img = _bit_img height, width = bit_img.shape contours = cv2.findContours((bit_img * 255).astype(np.uint8), cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE) contours_num = len(contours) if contours_num == 2: new_contours, _ = contours[0], contours[1] elif contours_num == 3: img, new_contours, _ = contours[0], contours[1], contours[2] contours_num = min(contours_num, max_candidates) boxes =[] box_scores =[] for contour in new_contours: box, s_height = ppocr.small_rectangle(contour) min_size = 3 if s_height < min_size: continue box = np.array(box) box_score = ppocr.rectangle_score(predict, box.reshape(-1, 2)) if box_score < 0.5: continue large_box = ppocr.large_rectangle(box).reshape(-1, 1, 2) large_box, s_height = ppocr.small_rectangle(large_box) if s_height < min_size+2: continue large_box = np.array(large_box) w = np.round(large_box[:, 0] / width * ori_width) h = np.round(large_box[:, 1] / height * ori_height) large_box[:, 0] = np.clip(w, 0, ori_width) large_box[:, 1] = np.clip(h, 0, ori_height) boxes.append(large_box.astype(np.int16)) box_scores.append(box_score) boxes = np.array(boxes, dtype=np.int16) return boxes, box_scores def get_text_img(ori_img, box): top_width = np.linalg.norm(box[0] - box[1]) down_width = np.linalg.norm(box[2] - box[3]) crop_img_width = int(max(top_width, down_width)) left_height = np.linalg.norm(box[0] - box[3]) right_height = np.linalg.norm(box[1] - box[2]) crop_img_height = int(max(left_height,right_height)) points_std = np.float32([[0, 0], [crop_img_width, 0], [crop_img_width, crop_img_height], [0, crop_img_height]]) box = box.astype(np.float32) M = cv2.getPerspectiveTransform(box, points_std) text_img = cv2.warpPerspective( ori_img, M, (crop_img_width, crop_img_height), borderMode=cv2.BORDER_REPLICATE, flags=cv2.INTER_CUBIC) text_img_height, text_img_width = text_img.shape[0:2] if text_img_height * 1.0 / text_img_width >= 1.5: text_img = np.rot90(text_img) return text_img def boxes_sorted(boxes): number = boxes.shape[0] dt_boxes = sorted(boxes, key=lambda x: (x[0][1], x[0][0])) dt_boxes = list(dt_boxes) for i in range(number - 1): if abs(dt_boxes[i + 1][0][1] - dt_boxes[i][0][1]) < 10 and \ (dt_boxes[i][0][0] > dt_boxes[i + 1][0][0]): tmp_box = dt_boxes[i] dt_boxes[i] = dt_boxes[i + 1] dt_boxes[i + 1] = tmp_box return dt_boxes def det_postprocess(out, ori_img): thresh = 0.5 boxes_batch = [] seg_img = np.array(out[0]) seg_img = seg_img.reshape(640,640,1) predict = seg_img.reshape([1, seg_img.shape[0], seg_img.shape[1]]) seg_matrix = predict > thresh kernel = np.array([[1, 1], [1, 1]]) for index in range(predict.shape[0]): src = np.array(seg_matrix[index]).astype(np.uint8) mask_img = cv2.dilate(src, kernel) boxes, box_score = ppocr.bit_img_boxes(predict[index], mask_img, ori_img.shape[1], ori_img.shape[0]) sorted_boxes = ppocr.boxes_sorted(boxes) crop_img_list = [] for index in range(len(sorted_boxes)): tmp_box = copy.deepcopy(sorted_boxes[index]) crop_img = ppocr.get_text_img(ori_img, tmp_box) crop_img_list.append(crop_img) return crop_img_list, sorted_boxes def text_detection(pics, model_xml, urlinfo): other_pin = rt_api.vectorVecFloat() out1 = rt_api.vectorFloat() out = rt_api.vectorVecFloat() out.append(out1) res = rt_api.infer_image(pics, 3, other_pin, model_xml, out, urlinfo) return res, out def resize_rec_img(img, max_wh_ratio): length = round(img.shape[1] / 100) * 100 if length == 0: length = 100 if length > 1200: length = 1200 dest_channel, dest_height, dest_width = [3, 32, length] resized_image = cv2.resize(img, (dest_width, dest_height)) resized_image = resized_image.transpose((2, 0, 1)) pad_img = np.zeros((dest_channel, dest_height, dest_width), dtype=np.float32) pad_img[:, :, 0:dest_width] = resized_image return pad_img def text_decode(preds_index, preds_prob=None, delete_duplicates=False): label_path = "./models/ppocr/ppocr_keys_v1.txt" use_space_char = True number = len(preds_index) label_str = "" with open(label_path, "rb") as f: lines = f.readlines() for line in lines: line = line.decode('utf-8').strip("\n") line = line.strip("\r\n") label_str += line if use_space_char: label_str += " " dict_label = list(label_str) dict_label = ['blank'] + dict_label ignored_characters = [0]#for ctc blank results = [] for index in range(number): characters = [] confidences = [] for idx in range(len(preds_index[index])): if delete_duplicates and idx > 0 and preds_index[index][idx - 1] == preds_index[index][idx]: continue if preds_index[index][idx] in ignored_characters: continue characters.append(dict_label[int(preds_index[index][idx])]) if preds_prob is not None: confidences.append(preds_prob[index][idx]) else: confidences.append(1) result = ''.join(characters) results.append((result,
np.mean(confidences)
numpy.mean
import os import numpy as np from bridge_evaluation_utils import read_text_lines, read_file_data, generate_depth_map, get_focal_length_baseline eigen_test_filename_path = '/home2/Dave/pytorch/athena/utils/filenames/eigen_test_files.txt' dataset_path = '/home2/Dave/downloads/kitti_raw/' eigen_test_gt_save_path = '/home2/Dave/pytorch/athena/test_gt/eigen/' eigen_test_gt_depth_save_path = eigen_test_gt_save_path + 'depths/' def generate_eigen_depth_gt(): ''' generate eigen split depth ground truth using the projected point cloud. also save imgage size, focal length and baseline :return: ''' if not os.path.exists(eigen_test_gt_save_path): os.makedirs(eigen_test_gt_save_path) if not os.path.exists(eigen_test_gt_depth_save_path): os.makedirs(eigen_test_gt_depth_save_path) n_samples = 697 eigen_test_files = read_text_lines(eigen_test_filename_path) gt_velo, gt_calib, im_sizes, im_files, cams = read_file_data(eigen_test_files, dataset_path) assert n_samples == len(gt_velo) focal_length = np.zeros((697, 1), dtype=np.float32) baseline = np.zeros((697, 1), dtype=np.float32) img_size = np.zeros((697, 2), dtype=np.float32) for idx in range(n_samples): camera_id = cams[idx] # 2 is left, 3 is right depth = generate_depth_map(gt_calib[idx], gt_velo[idx], im_sizes[idx], camera_id, False, True) np.save(eigen_test_gt_depth_save_path + 'depth_{}.npy'.format(idx), depth) fl, bl = get_focal_length_baseline(gt_calib[idx], camera_id) focal_length[idx] = fl baseline[idx] = bl img_size[idx] =
np.asarray(im_sizes[idx])
numpy.asarray
import numpy as np import dart_board import pybse from dart_board import sf_history from dart_board import constants as c # Load data chains = np.load("../data/J0513_evidence_chain.npy") derived =
np.load("../data/J0513_evidence_derived.npy")
numpy.load
import itertools import math import sys import numpy as np import numpy.linalg from pychemia import Structure, pcm_log from pychemia.utils.mathematics import integral_gaussian from pychemia.utils.periodic import atomic_number, covalent_radius, valence class StructureAnalysis: """ The set of analysis provided by this class uses only structural information of one single structure. The kind of analysis includes coordination numbers, bonds, distances, hardness, fingerprints. Most of those analysis rely on a robust computation of inter-atomic distances. This class uses lazy evaluation for the distances and bonds to lower cpu and memory footprint. """ def __init__(self, structure, supercell=(1, 1, 1), radius=50): """ Takes one pychemia Structure object and will create a StructureAnalysis object. This object is computes the distances between all atoms in the unit cell with replicas extending up to a distance given by 'radius'. A supercell could be needed in cases where full connectivity of the entire crystal is needed. :param structure: A pychemia Structure object :param supercell: A supercell to be created from the Structure (Default=(1,1,1)) :param radius: Maximal distance computed between two atoms """ assert (isinstance(structure, Structure)) if supercell != (1, 1, 1): self.structure = structure.supercell(supercell) else: self.structure = structure.copy() self._distances = None self._all_distances = None self._pairs = None self._supercell = supercell self._radius = radius # log.debug('Supercell : ' + str(self._supercell)) # log.debug('Radius : %7.2f' % self._radius) @property def radius(self): return self._radius @radius.setter def radius(self, value): assert (value > 0.0) if value != self._radius: self._distances = None self._pairs = None self._all_distances = None self._radius = value @property def distances(self): return self._distances def close_distances(self): """ Computes the closest distances for all the atoms :return: (tuple) Return a bond's dictionary and distance's list """ if self._pairs is None or self._distances is None: pcm_log.debug('Computing distances from scratch...') pairs_dict = {} distances_list = [] index = 0 for i, j in itertools.combinations(range(self.structure.natom), 2): if index % 100 == 0: pcm_log.debug('Computing distance between atoms %d and %d' % (i, j)) ret = self.structure.lattice.distance2(self.structure.reduced[i], self.structure.reduced[j], radius=self.radius) for k in ret: if str(i) not in pairs_dict: pairs_dict[str(i)] = [index] else: pairs_dict[str(i)].append(index) if str(j) not in pairs_dict: pairs_dict[str(j)] = [index] else: pairs_dict[str(j)].append(index) ret[k]['pair'] = (i, j) distances_list.append(ret[k]) index += 1 for i in range(self.structure.natom): ret = self.structure.lattice.distance2(self.structure.reduced[i], self.structure.reduced[i]) for k in ret: if str(i) not in pairs_dict: pairs_dict[str(i)] = [index] else: pairs_dict[str(i)].append(index) ret[k]['pair'] = (i, i) distances_list.append(ret[k]) index += 1 self._pairs = pairs_dict self._distances = distances_list return self._pairs, self._distances def all_distances(self): if self._all_distances is None: ret = {} for i, j in itertools.combinations_with_replacement(range(self.structure.natom), 2): pair = (i, j) ret[pair] = self.structure.lattice.distances_in_sphere(self.structure.reduced[i], self.structure.reduced[j], radius=self.radius) self._all_distances = ret return self._all_distances def all_distances_by_species(self): all_distances = self.all_distances() ret = {} symbols_indexed = np.array([self.structure.species.index(x) for x in self.structure.symbols]) # print self.structure.symbols for ipair in all_distances: key = tuple(sorted(symbols_indexed[np.array(ipair)])) # print ipair, key if key in ret: ret[key] = np.concatenate((ret[key], all_distances[ipair]['distance'])) else: ret[key] = all_distances[ipair]['distance'].copy() # Sorting arrays for key in ret: ret[key].sort() return ret def structure_distances(self, delta=0.01, sigma=0.01, integrated=True): dist_spec = self.all_distances_by_species() discrete_rdf = {} nbins = int((self.radius + 5 * delta) / delta) discrete_rdf_x = np.arange(0, nbins * delta, delta) for spec_pair in dist_spec: discrete_rdf[spec_pair] = np.zeros(nbins) positive_distances = dist_spec[spec_pair][dist_spec[spec_pair] > 0] # log.debug('Pair %s' % str(spec_pair)) for Rij in positive_distances: # Integrating for bin from - 8*sigma to +8*sigma centered on Rij # Values outside this range are negligible imin = int(max(0, (Rij - 8 * sigma) / delta)) imax = int(min(len(discrete_rdf_x), (Rij + 8 * sigma) / delta)) # log.debug('Computing for distance %7.3f for indices between %d and %d' % (Rij, imin, imax)) for i in range(imin, imax): x = discrete_rdf_x[i] if not integrated: discrete_rdf[spec_pair][i] += np.exp(-((x - Rij) ** 2) / (2 * sigma * sigma)) / ( 4 * math.pi * Rij * Rij) else: discrete_rdf[spec_pair][i] += integral_gaussian(x, x + delta, Rij, sigma) / ( 4 * math.pi * Rij * Rij) return discrete_rdf_x, discrete_rdf def fp_oganov(self, delta=0.01, sigma=0.01): struc_dist_x, struc_dist = self.structure_distances(delta=delta, sigma=sigma) fp_oganov = struc_dist.copy() vol = self.structure.volume for spec_pair in struc_dist: for i in range(len(struc_dist[spec_pair])): specie0=self.structure.species[spec_pair[0]] specie1=self.structure.species[spec_pair[1]] number_atoms0 = self.structure.composition[specie0] number_atoms1 = self.structure.composition[specie1] fp_oganov[spec_pair][i] *= vol / (delta * number_atoms0 * number_atoms1) fp_oganov[spec_pair][i] -= 1 return struc_dist_x, fp_oganov def bonds_coordination(self, initial_cutoff_radius=0.8, use_laplacian=True, jump=0.01, tol=1E-15): cutoff_radius = initial_cutoff_radius ad = self.all_distances() bonds = {} while True: laplacian = np.zeros((self.structure.natom, self.structure.natom), dtype=np.int8) for pair in ad: atom1 = self.structure.symbols[pair[0]] atom2 = self.structure.symbols[pair[1]] sum_covalent_radius = sum(covalent_radius([atom1, atom2])) condition = np.bitwise_and(ad[pair]['distance'] < cutoff_radius * sum_covalent_radius, ad[pair]['distance'] > 0) bonds[pair] = ad[pair]['distance'][condition] if len(bonds[pair]) > 0: laplacian[pair[0], pair[1]] = -1 laplacian[pair[1], pair[0]] = -1 for i in range(self.structure.natom): laplacian[i, i] = 0 laplacian[i, i] = -sum(laplacian[i]) if use_laplacian: if np.max(np.abs(laplacian)) == 0: cutoff_radius += jump ev = numpy.linalg.eigvalsh(laplacian) if sum(ev < tol) > 1: cutoff_radius += jump else: break else: break coordination = np.zeros(self.structure.natom, dtype=int) for pair in bonds: coordination[pair[0]] += len(bonds[pair]) coordination[pair[1]] += len(bonds[pair]) return bonds, coordination, round(cutoff_radius, 3) def get_bonds_coordination(self, initial_cutoff_radius=0.8, ensure_conectivity=False, use_laplacian=True, verbose=False, tol=1E-15, jump=0.01, use_jump=True): """ Computes simultaneously the bonds for all atoms and the coordination number using a multiplicative tolerance for the sum of covalent radius :param use_jump: :param jump: :param tol: :param verbose: :param use_laplacian: :param initial_cutoff_radius: (float) Tolerance factor (default is 1.2) :param ensure_conectivity: (bool) If True the tolerance of each bond is adjusted to ensure that each atom is connected at least once :return: tuple """ if verbose: print('Computing all distances...') bonds_dict, distances_list = self.close_distances() if verbose: print('Number of distances computed: ', len(distances_list)) cutoff_radius = initial_cutoff_radius bonds = None coordination = None tolerances = None while True: if verbose: print('Current cutoff radius : ', cutoff_radius) bonds = [] tolerances = [] for i in range(self.structure.natom): tole = cutoff_radius while True: tmp_bonds = [] min_proportion = sys.float_info.max for j in bonds_dict[str(i)]: atom1 = self.structure.symbols[distances_list[j]['pair'][0]] atom2 = self.structure.symbols[distances_list[j]['pair'][1]] sum_covalent_radius = sum(covalent_radius([atom1, atom2])) distance = distances_list[j]['distance'] if distance == 0.0: continue proportion = distance / sum_covalent_radius min_proportion = min(min_proportion, proportion) if proportion <= tole: tmp_bonds.append(j) if len(tmp_bonds) == 0 and ensure_conectivity: tole = min_proportion cutoff_radius = tole else: bonds.append(tmp_bonds) tolerances.append(min_proportion) break if use_laplacian: size = (self.structure.natom, self.structure.natom) laplacian = np.zeros(size, dtype=np.int8) for listibonds in bonds: for ibond in listibonds: data = distances_list[ibond] i = data['pair'][0] j = data['pair'][1] laplacian[i, j] = -1 laplacian[j, i] = -1 # print '%d %d' % (i,j) for i in range(self.structure.natom): laplacian[i, i] = 0 laplacian[i, i] = -sum(laplacian[i]) if verbose: print(laplacian) if np.max(
np.abs(laplacian)
numpy.abs
""" fitmaker.py written by <NAME> : <EMAIL> Collection of functions to run the fit (gtlike) the class FitMaker will call the function of the observation class (gtfunction.py) and prepare the fit by computing the fits file. it can distinguish between the binned and unbinnned analysis begun September 2011 """ #import logging #logging.basicConfig(level=logging.INFO) #log = logging.getLogger(__name__) import numpy as np import string try: import astropy.io.fits as fits except ImportError: import pyfits as fits from UnbinnedAnalysis import UnbinnedAnalysis, UnbinnedObs from BinnedAnalysis import BinnedAnalysis, BinnedObs, BinnedConfig from enrico import utils from enrico import Loggin from enrico import environ import pyLikelihood as pyLike class FitMaker(Loggin.Message): """Collection of functions to prepare/run the GTLIKE fit and compute an upper limit is needed""" def __init__(self, obs, config): super(FitMaker,self).__init__() Loggin.Message.__init__(self) self.obs = obs self.config = config self.task_number = 1 self.log_like = 0 def _log(self, task='', description=''): print() print(("\033[34m"+'# ' + '*' * 60)) if task: task = '%10s --- ' % task print(("\033[34m"+'# *** %3d %s%s' % (self.task_number, task, description))) print(("\033[34m"+'# ' + '*' * 60+"\033[0m")) self.task_number += 1 def FirstSelection(self,config=None): """Make a coarse selection of events from original file""" self._log('gtselect', 'Select data from library, coarse cut')#run gtselect if config!=None: self.obs.Configuration = config self.obs.LoadConfiguration() self.obs.FirstCut() self.obs.Configuration = self.config self.obs.LoadConfiguration() else: self.obs.FirstCut() def GenerateFits(self): """Run the different ST tools and compute the fits files First it runs the tools that are common to the binned and unbinned analysis chain then it run the specific tools following the choise of the user""" #Run the tools common to binned and unbinned chain self._log('gtselect', 'Select data from library, fine cut')#run gtselect self.obs.SelectEvents() self._log('gtmktime', 'Update the GTI and cut data based on ROI')#run gtmktime self.obs.MkTime() if (self.config["analysis"]["ComputeDiffrsp"] == "yes" and self.config["analysis"]["likelihood"] == "unbinned"): self._log('gtdiffrsp', 'Compute Diffuse response') self.obs.DiffResps()#run gtdiffresp self._log('gtbin', 'Create count maps (square fully embed in the ROI circle)') self.obs.Gtbin() self._log('gtltcube', 'Make live time cube')#run gtltcube self.obs.ExpCube() #Choose between the binned of the unbinned analysis if self.config['analysis']['likelihood'] == 'binned': #binned analysis chain self._log('gtbin', 'Make count map CCUBE')#run gtbin self.obs.GtCcube() self._log('gtexpcube2', 'Make binned exposure cube')#run gtexpcube2 self.obs.GtBinnedMap() self._log('gtsrcmap', 'Make a source map')#run gtsrcmap self.obs.SrcMap() self.info('Compute the psf')#run gtpsf self.obs.GtPSF() self.info('Compute Energy Migration Matrices')#run gtpsf self.obs.GtDRM() # the model should be generated for each component after optimizing. #self._log('gtmodel', 'Make a model map')#run gtmodel #self.obs.ModelMap(self.config["file"]["xml"]) if self.config['analysis']['likelihood'] == 'unbinned': #unbinned analysis chain self._log('gtexpmap', 'Make an exposure map') self.obs.ExpMap() #the function ends here. It does not run gtlike def CreateLikeObject(self): """Create an UnbinnedAnalysis or a BinnedAnalysis and retrun it.""" #create binnedAnalysis object if self.config['analysis']['likelihood'] == 'binned': use_edisp = self.config['analysis']['EnergyDispersion'] == 'yes' edisp_bins = -2 if use_edisp==True else 0 Obs = BinnedObs(srcMaps=self.obs.srcMap, expCube=self.obs.Cubename, binnedExpMap=self.obs.BinnedMapfile, irfs=self.obs.irfs) Cfg = BinnedConfig(use_edisp=use_edisp, edisp_bins=edisp_bins) Fit = BinnedAnalysis(Obs, self.obs.xmlfile, config=Cfg, optimizer=self.config['fitting']['optimizer']) Fit.setEnergyRange(self.obs.Emin,self.obs.Emax) #print(("Is edisp enabled? {0}".format(str(Fit.logLike.use_edisp())))) #create a unbinnedAnalysis object if self.config['analysis']['likelihood'] == 'unbinned': Obs = UnbinnedObs(self.obs.mktimefile, self.obs.ft2, expMap=self.obs.Mapname, expCube=self.obs.Cubename, irfs=self.obs.irfs) Fit = UnbinnedAnalysis(Obs, self.obs.xmlfile, optimizer=self.config['fitting']['optimizer']) # Fix this, EBL absorbed models use LogParabola with b=0 instead of PowerLaw, # we may want to allow fixed shape for that case if float(self.config['Spectrum']['FrozenSpectralIndex']!=0): parameters = dict() parameters['Index'] = -float(self.config['Spectrum']['FrozenSpectralIndex']) parameters['alpha'] = +float(self.config['Spectrum']['FrozenSpectralIndex']) parameters['Index1'] = -float(self.config['Spectrum']['FrozenSpectralIndex']) parameters['beta'] = 0 parameters['Index2'] = 2. parameters['Cutoff'] = 30000. # set the cutoff to be high for key in list(parameters.keys()): IdGamma = utils.getParamIndx(Fit, self.obs.srcname, key) if (IdGamma == -1): continue else: self.info("Freezing %s = %s" %(str(key),str(parameters[key]))) Fit[IdGamma] = parameters[key] # set the parameter Fit[IdGamma].setFree(False)#the variable index is frozen to compute the UL return Fit #return the BinnedAnalysis or UnbinnedAnalysis object. def GetOptObject(self,Fit,method): if method == 'MINUIT': optObject = pyLike.Minuit(Fit.logLike) elif method == 'NEWMINUIT': optObject = pyLike.NewMinuit(Fit.logLike) else: optFactory = pyLike.OptimizerFactory_instance() optObject = optFactory.create(str(method), Fit.logLike) return optObject def GetStatus(self,Fit,optObject): status = optObject.getRetCode() if isinstance(optObject, pyLike.Minuit) == 'MINUIT': edm = optObject.getDistance() quality = optObject.getQuality() if status==0 else 2 elif isinstance(optObject, pyLike.NewMinuit): edm = optObject.getDistance() check = np.bitwise_and(np.bitwise_not(156), status) quality = 3 if check==0 else 0 else: edm = optObject.getDistance() quality = 3 if status==0 else 0 loglike = optObject.stat().value() return(edm,quality,loglike) def _PerformFit(self, Fit, methods=["MINUIT"], covar=False): """Perform a fit with the selected methods and extract the edm, quality of fit, etc.""" for method in methods: try: optObject = self.GetOptObject(Fit,self.config['fitting']['optimizer']) self.log_like = Fit.fit(verbosity=0, covar=True, optimizer=method, optObject=optObject) edm,quality,loglike = self.GetStatus(Fit,optObject) if self.config['verbose'] == 'yes' : print(('Fit output with {1}: {0} [quality: {2}]'.format( self.log_like,self.config['fitting']['optimizer'],quality))) except Exception as exc: self.warning('Exception while running {0}, {1}'.format(method,str(exc))) else: break return(Fit) def PerformFit(self, Fit, writeXml = True): """Run gtlile tool. First it run gtlike with the DRNMGB optimizer and the user optimizer after. A dictionnay is return with all the releveant results""" self._log('gtlike', 'Run likelihood analysis') # TODO fix this part # Change the fit tolerance to the one given by the user Fit.ftol = float(self.config['fitting']['ftol']) # Fit with DRMNGB/DRMNFB (as recommended in gtlike fhelp) optimizer to # obtain initial parameters close to the minimum. Then switch optimizer. list_of_methods = ['DRMNFB','DRMNGB'] Fit = self._PerformFit(Fit, list_of_methods, covar=False) # 2nd precise fit with the user optimizer and ask gtlike to compute the covariance matrix if self.config['fitting']['optimizer'] in ["MINUIT","NEWMINUIT"]: list_of_methods = ["NEWMINUIT","MINUIT"] else: list_of_methods = [self.config['fitting']['optimizer']] Fit = self._PerformFit(Fit, list_of_methods, covar=True) # remove source with TS<min_source_TS (default=1) # to improve convergence and re-fit try: self.config['fitting']['min_source_TS'] except KeyError: self.config['fitting']['min_source_TS'] = 1. Fit = self.RemoveWeakSources(Fit, self.config['fitting']['min_source_TS']) self._log('Re-optimize', '') self._PerformFit(Fit, list_of_methods, covar=True) self.outXml = None if writeXml : self.outXml = utils._dump_xml(self.config) Fit.writeXml(self.outXml) self.success("Fit with gtlike performed") def RemoveWeakSources(self,Fit,minTS=1.0): """Remove weak sources and re-optimize to get a better minimum.""" self._log('','Remove all the weak (TS<%.2f) sources' %minTS) SrcTsDict = dict() for src in Fit.model.srcNames: SrcTsDict[src] = Fit.Ts(src) if (SrcTsDict[src]<minTS) and not(src == self.obs.srcname): for comp in Fit.components: if comp.logLike.getSource(src).getType() == 'Point': if self.config['verbose'] == 'yes' : self.info("deleting source "+src+" with TS = "+\ str(SrcTsDict[src])+" from the model") comp.deleteSource(src) for src in Fit.model.srcNames: if (SrcTsDict[src]>=minTS): if self.config['verbose'] == 'yes' : self.info('keeping source {0} with TS={1:.2e}'.format(src,SrcTsDict[src])) return Fit def GetAndPrintResults(self, Fit): """Get and print some useful results. Also contruct a dictonnary and fill it with results""" if self.config['verbose'] == 'yes' : self._log('Results', 'Print results of the fit') Result = {} if self.config['verbose'] == 'yes' : print((Fit.model,"\n")) self.info("Results for the Fit") # Print src name, Npred and TS for source with TS > 5 print("Source Name\tNpred\tTS") #TODO #for src in Fit.model.srcNames: #if Fit.Ts(src) > 5: # print src, "\t%2.3f\t%2.3f" % (Fit.NpredValue(src), Fit.Ts(src)) # fill the dictonnary with some values Result['Optimizer'] = self.config['fitting']['optimizer'] Result['Npred'] = Fit.NpredValue(self.obs.srcname) Result['TS'] = Fit.Ts(self.obs.srcname) if self.config['verbose'] == 'yes' : print(("Values and (MINOS) errors for " + self.obs.srcname)) print(("TS : ", Fit.Ts(self.obs.srcname))) # Get the python object 'Spectrum' for the source of interest spectrum = Fit[self.obs.srcname].funcs['Spectrum'] # Get the names of the parameters for the source of interest ParName = spectrum.paramNames #Get the model type and fill the dictonnary stype = Fit.model.srcs[self.obs.srcname].spectrum().genericName() Result['ModelType'] = stype Result['log_like'] = Fit.logLike.value() #Add the energy information to the result dictionnary Result['Emin'] = self.obs.Emin Result['Emax'] = self.obs.Emax #Add the time information to the result dictionnary Result['tmin'] = self.config['time']['tmin'] Result['tmax'] = self.config['time']['tmax'] Result['SrcName'] = self.obs.srcname Result['Flux'] = Fit.flux(self.obs.srcname,self.obs.Emin,self.obs.Emax) Result['dFlux'] = Fit.fluxError(self.obs.srcname,self.obs.Emin,self.obs.Emax) Result['EFlux'] = Fit.energyFlux(self.obs.srcname,self.obs.Emin,self.obs.Emax) Result['dEFlux'] = Fit.energyFluxError(self.obs.srcname,self.obs.Emin,self.obs.Emax) for par in ParName : #Loop over the parameters and get value, error and scale ParValue = spectrum.getParam(par).value() ParError = spectrum.getParam(par).error() Scale = spectrum.getParam(par).getScale() Result[par] = ParValue * Scale Result['d'+par] = ParError * Scale if ParError>0: # Compute MINOS errors for relevent parameters Fit.Ts(self.obs.srcname) > 5 and try: MinosErrors = Fit.minosError(self.obs.srcname, par) if self.config['verbose'] == 'yes' : print((par+" : %2.2f +/- %2.2f [ %2.2f, + %2.2f ] %2.0e" % (ParValue, ParError, MinosErrors[0], MinosErrors[1], Scale))) Result.update({'d'+par+'-': MinosErrors[0] * Scale}) Result.update({'d'+par+'+': MinosErrors[1] * Scale}) except: if self.config['verbose'] == 'yes' : print((par+" : %2.2f +/- %2.2f %2.0e" % (ParValue, ParError, Scale))) else: if self.config['verbose'] == 'yes' : print((par+" : %2.2f %2.0e" % (ParValue, Scale))) try: # get covariance matrix if self.config['verbose'] == 'yes' : utils.GetCovar(self.obs.srcname, Fit) except: pass #if the covariance matrix has not been computed if self.config['verbose'] == 'yes' : utils.GetFluxes(Fit,self.obs.Emin,self.obs.Emax) #print the flux of all the sources #Compute an UL if the source is too faint if float(self.config['UpperLimit']['TSlimit']) > Fit.Ts(self.obs.srcname): if self.config['UpperLimit']['envelope'] == 'yes': self.EnvelopeUL(Fit) else: Ulval = self.ComputeUL(Fit) Result['Ulvalue'] = Ulval return Result #Return the dictionnary def PoissonUL(self,Fit): """ Compute UL using Feldman-cousin poisson stat""" self.info('Compute the exposure')#run gtexposure try : spfile = fits.open(self.obs.lcfile) except: self.obs.GtLCbin(dt = self.config['time']['tmax']-self.config['time']['tmin']) #spfile = pyfits.open(self.obs.lcfile) try: self.obs.Configuration['AppLC']['index'] = self.config['UpperLimit']['SpectralIndex'] except: try: self.obs.Configuration['AppLC']['index'] except: self.info("Cannot find the spectral index") self.obs.Configuration['AppLC']['index'] = 1.5 self.info("Assuming spectral index of %s" %self.info("Assuming default index of 1.5")) self.obs.GtExposure() Exposure = np.sum(spfile[1].data.field("EXPOSURE")) ### Run it in any case (useful for instance for the DL3) #self.info('Compute the psf')#run gtpsf #self.obs.GtPSF() ccube = fits.open(self.obs.ccube) psfres = fits.open(self.obs.psf) #read psf and get the 68% containement radius theta = (psfres[2].data["Theta"]) theta68 = np.zeros(len(psfres[1].data["psf"])) for i in range(len(psfres[1].data["psf"])): integral = np.trapz(psfres[1].data["psf"][i],theta) for j in range(psfres[1].data["psf"][i].size): if np.trapz(psfres[1].data["psf"][i][:j],theta[:j])/integral>0.68: theta68[i] = theta[j] break #read the CCUBE x = np.arange(-abs(int(ccube[0].header["CRPIX1"])*ccube[0].header["CDELT1"]),\ abs(ccube[0].header["CRPIX1"]*ccube[0].header["CDELT1"]),abs(ccube[0].header["CDELT1"])) y = np.arange(-abs(ccube[0].header["CRPIX2"]*ccube[0].header["CDELT2"]),\ abs(int(ccube[0].header["CRPIX2"])*ccube[0].header["CDELT2"]),abs(ccube[0].header["CDELT2"])) xx, yy = np.meshgrid(x, y) dist = np.sqrt(xx**2 + yy**2) Obsevt = 0 #compute the number of events within the PSF radius for i in range(len(psfres[1].data["psf"])): maps = ccube[0].data[i] Obsevt += sum(maps[dist<theta68[i]])/0.68 nbg = max(0,int(Obsevt-Fit.NpredValue(self.obs.srcname))) Obsevt = int(Fit.NpredValue(self.obs.srcname)+nbg) if Obsevt> 20: self.warning("Observed Numbers of event too high (>20)\n abort and return -1") return -1 cl = str(int(float(self.config['UpperLimit']['cl'])*100)) try : ullookup = np.genfromtxt(environ.ENRICO_DIR+'/enrico/extern/UL_poisson_'+cl+'.dat',unpack=True) except: self.warning("cannot find the file "+environ.ENRICO_DIR+'/enrico/extern/UL_poisson_'+cl+'.dat') bkglookup = np.array([0.0,0.5,1.0,1.5,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0]) measurement = ullookup[0] #the first row is the measurement uls = ullookup[2:-1:2] #keep only 1 row over 2 since we don't care about LL self.info("Found "+str(Obsevt)+" events for "+str(nbg)+" background event ") # print uls[bkglookup.searchsorted(nbg)][measurement.searchsorted(Obsevt)]," ",Exposure return uls[bkglookup.searchsorted(nbg)][measurement.searchsorted(Obsevt)]/Exposure def ComputeUL(self, Fit): """Compute an Upper Limit using either the profil or integral method See the ST cicerone for more information on the 2 method""" self._log('UpperLimit', 'Compute upper Limit') #Index given by the user self.info("Assumed index is "+str(self.config['UpperLimit']['SpectralIndex'])) parameters = dict() parameters['Index'] = -float(self.config['UpperLimit']['SpectralIndex']) parameters['alpha'] = +float(self.config['UpperLimit']['SpectralIndex']) parameters['Index1'] = -float(self.config['UpperLimit']['SpectralIndex']) parameters['beta'] = 0 parameters['Index2'] = 2. parameters['Cutoff'] = 30000. # set the cutoff to be high for key in list(parameters.keys()): try: utils.FreezeParams(Fit,self.obs.srcname, key, parameters[key]) except: continue import scipy.stats cl = float(self.config['UpperLimit']['cl']) delta = 0.5*scipy.stats.chi2.isf(1-2*(cl-0.5), 1) if self.config['UpperLimit']['Method'] == "Profile": #The method is Profile if Fit.Ts(self.obs.srcname)<2 : self.warning("TS of the source is very low, better to use Integral method") try: import UpperLimits ulobject = UpperLimits.UpperLimits(Fit) ul, _ = ulobject[self.obs.srcname].compute(emin=self.obs.Emin, emax=self.obs.Emax,delta=delta) #delta=2.71 / 2) self.info("Upper limit using Profile method: ") #print ulobject[self.obs.srcname].results self.warning("Be sure to have enough photons to validate the gaussian assumption") except RuntimeError: self.warning("ST UpperLimits returned RuntimeError, trying Integral") self.config['UpperLimit']['Method'] = 'Integral' if self.config['UpperLimit']['Method'] == "Integral": #The method is Integral import IntegralUpperLimit try: ul, _ = IntegralUpperLimit.calc_int(Fit, self.obs.srcname, cl=cl, verbosity=0,emin=self.obs.Emin, emax=self.obs.Emax) except RuntimeError: self.warning("ST UpperLimits returned RuntimeError, trying Poisson") self.config['UpperLimit']['Method'] = 'Poisson' print(("Upper limit using Integral method: ", ul)) self.warning("Be sure to have enough photons to validate the gaussian assumption") if self.config['UpperLimit']['Method'] == "Poisson": #The method is Poisson ul = self.PoissonUL(Fit) print(("Upper limit using Poisson statistic: ", ul)) print("This is an ul on the integral flux in ph/cm2/s") return ul #Return the result. This is an ul on the integral flux in ph/cm2/s def EnvelopeUL(self, Fit): """Compute the envelope UL. An UL is computed for different index and the maximum is taken at each energy. This is usefull when the source index is not know or can not be constrain by theoritical argument The index range form 1.5 to 2.5""" import IntegralUpperLimit import UpperLimits self._log('EnvelopeUL', 'Compute upper limit envelope') PhIndex = Fit.par_index(self.obs.srcname, 'Index') Nbp = 20 #Make Nbp computations Npgraph = 100#The graph has Npgraph points ener = np.logspace(np.log10(self.obs.Emin), np.log10(self.obs.Emax), Npgraph)#the array containing the energy Ulenv =
np.array(Npgraph * [0.])
numpy.array
import numpy as np import math from scipy.interpolate import RegularGridInterpolator, NearestNDInterpolator, LinearNDInterpolator class vdf(): def __init__(self, v_max, resolution, coord_sys): self.grid_cart = None self.grid_spher = None self.dvvv = None self.vdf_interp = np.zeros((resolution,resolution,resolution)) grid_cart, grid_spher, grid_cyl, dvvv = self.init_grid(v_max, resolution, coord_sys) self.grid_cart_t = self.grid_cart.copy() self.grid_spher_t = self.grid_spher.copy() self.nb_counts = np.zeros((resolution,resolution,resolution)) def interpolate_cart_vdf(self, grid, vdf0, interpolate='near'): if interpolate == 'near': method_str = 'nearest' elif interpolate == 'lin': method_str = 'linear' if interpolate in ['near', 'lin']: if vdf0.ndim==2: interpFunc = RegularGridInterpolator( (grid[0,:,0], grid[1,0,:]), vdf0, bounds_error=False, method=method_str, fill_value=np.nan) d = interpFunc(self.grid_cart[[0,2],:,:,0].reshape(2,-1).T) ## Ugly AF. d = d.reshape((self.vdf_interp.shape[0],self.vdf_interp.shape[0])) self.vdf_interp = d[:,:,None] elif vdf0.ndim==3: interpFunc = RegularGridInterpolator( (grid[0,:,0,0], grid[1,0,:,0], grid[2,0,0,:]), vdf0, bounds_error=False, method=method_str, fill_value=np.nan) d = interpFunc(self.grid_cart.reshape(3,-1).T) self.vdf_interp = d.T.reshape(self.vdf_interp.shape) ## (res,res,res) # # # elif interpolate=='cub': # d = np.zeros_like(gridCart[0]).flatten() # ip = tricubic.tricubic(list(distribPeriod), [distribPeriod.shape[0],distribPeriod.shape[1],distribPeriod.shape[2]]) # deltaSpeed = (np.log10(pSet.speed[iS,1]/normTh)-np.log10(pSet.speed[iS,0]/normTh)) # ds = pSet.speed[iS,1:]/normTh-pSet.speed[iS,:-1]/normTh # deltaTheta = pSet.theta[1]-pSet.theta[0] # deltaPhi = pSet.phi[iS,1]-pSet.phi[iS,0] # vMinSpeed = np.log10(pSet.speed[iS,0]/normTh) # vMinTheta = 0. # vMinPhi = 0. # # # gridS[0] = np.log10(gridS[0]) ## gridS here becomes an array of bin index, to which the coordinate belongs. # # gridS[0] = (gridS[0]-vMinSpeed)/deltaSpeed #+ 1.5 # bin = np.digitize(gridS[0], pSet.speed[iS]/normTh)-1 # gridS[0] = bin + (gridS[0]-pSet.speed[iS,bin]/normTh)/ds[bin] # gridS[1] = (gridS[1]-vMinTheta)/deltaTheta + .5 # gridS[2] = (gridS[2]-vMinPhi)/deltaPhi + .5 # for i, node in enumerate(gridS.reshape((3,-1)).T): # d[i] = ip.ip(list(node)) # # itkTmp = (d<0) # d = d.reshape((resFinal,resFinal,resFinal)) # d[gridS[0]<0] = np.nan ## "fill_value". Should also be done for values larger than, and not only smaller than. # # d[itkTmp] = 0 # # sys.exit() # #_________________ def interpolate_spher_vdf(self, grid, vdf0, interpolate='near', psp=False): speed = grid[0,:,0,0][::-1].copy() theta = grid[1,0,:,0].copy() phi = grid[2,0,0,:].copy() vdf0 = np.flip(vdf0, axis=(0)) if interpolate == 'near': interp_method = 'nearest' elif interpolate == 'lin': interp_method = 'linear' # itk = self.grid_spher[2]>np.pi # self.grid_spher[2,itk] -= 2.*np.pi if psp: phi -= 60.*np.pi/180. phi %= 2.*np.pi self.grid_spher_t[2] -= 60.*np.pi/180 self.grid_spher_t[2] %= 2.*np.pi # phiPeriod = np.zeros(18) # phiPeriod[1:-1] = phi # phiPeriod[0] = phi[-1]-2*np.pi # phiPeriod[-1] = phi[0]+2*np.pi # thetaPeriod = np.zeros(10) # thetaPeriod[1:-1] = theta # thetaPeriod[0] = theta[-1]-np.pi # thetaPeriod[-1] = theta[0]+np.pi # distribPeriod = np.zeros((32,10,18)) # distribPeriod[:,1:-1,1:-1] = vdf0 # distribPeriod[:,1:-1,0] = vdf0[:,:,-1] # distribPeriod[:,1:-1,-1] = vdf0[:,:,0] # distribPeriod[:,0] = np.nanmean(distribPeriod[:,1], axis=1)[:,None] # distribPeriod[:,9] = np.nanmean(distribPeriod[:,8], axis=1)[:,None] # itkR = ~np.isnan(speed) # interpFunc = RegularGridInterpolator( (speed, thetaPeriod, phiPeriod), # distribPeriod, # bounds_error=False, method=interp_method, # fill_value=np.nan) interpFunc = RegularGridInterpolator( (speed, theta, phi), vdf0, bounds_error=False, method=interp_method, fill_value=np.nan) d = interpFunc(self.grid_spher_t.reshape(3,-1).T) d = d.T.reshape(self.vdf_interp.shape) ## (res,res,res) d[np.isnan(d)] = 0. self.nb_counts += (~np.isnan(d)) self.vdf_interp += d def transform_grid(self, R=None, v=None, s=None): if R is not None: gc = self.grid_cart.copy() self.grid_cart_t = np.dot(R, gc.reshape(3,-1)).reshape(self.grid_cart.shape) if v is not None: self.grid_cart_t -= v[:,None,None,None] self.grid_spher_t = self.cart2spher(self.grid_cart_t) # if interpolate=='near': # interpFunc = RegularGridInterpolator( (speed, thetaPeriod, phiPeriod), # (distribPeriod), # bounds_error=False, method='nearest', # fill_value=np.nan) # d = interpFunc(self.grid_spher_t.reshape(3,-1).T) # d = d.T.reshape(self.vdf_interp.shape) ## (res,res,res) # d[np.isnan(d)] = 0. # self.vdf_interp += d # print(np.nanmin(d), np.nanmax(d)) # # # # elif interpolate=='lin': # interpFunc = RegularGridInterpolator( (speed, thetaPeriod, phiPeriod), # (distribPeriod), # bounds_error=False, method='linear', # fill_value=np.nan) # # interpFunc = RegularGridInterpolator( (speed, theta, phi), # # vdf0, # # bounds_error=False, method='linear', # # fill_value=np.nan) # d = interpFunc(self.grid_spher_t.reshape(3,-1).T) # d = d.T.reshape(self.vdf_interp.shape) ## (res,res,res) # d[np.isnan(d)] = 0. # self.vdf_interp += d # # elif interpolate=='cub': # d = np.zeros_like(gridCart[0]).flatten() # ip = tricubic.tricubic(list(distribPeriod), [distribPeriod.shape[0],distribPeriod.shape[1],distribPeriod.shape[2]]) # deltaSpeed = (np.log10(pSet.speed[iS,1]/normTh)-np.log10(pSet.speed[iS,0]/normTh)) # ds = pSet.speed[iS,1:]/normTh-pSet.speed[iS,:-1]/normTh # deltaTheta = pSet.theta[1]-pSet.theta[0] # deltaPhi = pSet.phi[iS,1]-pSet.phi[iS,0] # vMinSpeed = np.log10(pSet.speed[iS,0]/normTh) # vMinTheta = 0. # vMinPhi = 0. # # # gridS[0] = np.log10(gridS[0]) ## gridS here becomes an array of bin index, to which the coordinate belongs. # # gridS[0] = (gridS[0]-vMinSpeed)/deltaSpeed #+ 1.5 # bin = np.digitize(gridS[0], pSet.speed[iS]/normTh)-1 # gridS[0] = bin + (gridS[0]-pSet.speed[iS,bin]/normTh)/ds[bin] # gridS[1] = (gridS[1]-vMinTheta)/deltaTheta + .5 # gridS[2] = (gridS[2]-vMinPhi)/deltaPhi + .5 # for i, node in enumerate(gridS.reshape((3,-1)).T): # d[i] = ip.ip(list(node)) # # itkTmp = (d<0) # d = d.reshape((resFinal,resFinal,resFinal)) # d[gridS[0]<0] = np.nan ## "fill_value". Should also be done for values larger than, and not only smaller than. # # d[itkTmp] = 0 # # sys.exit() # if psp: # self.grid_spher_t[2] += 60.*np.pi/180 # self.grid_spher_t[2] %= 2.*np.pi # #_________________ def init_grid(v_max, resolution, grid_geom): """Here we define the bin edges and centers, depending on the chosen coordinate system.""" if grid_geom == 'cart': edgesX = np.linspace(-v_max, v_max, resolution + 1, dtype=np.float32) centersX = (edgesX[:-1] + edgesX[1:]) * .5 # 3 x res x res_phi x res/2 grid_cart = np.mgrid[-v_max:v_max:resolution*1j, -v_max:v_max:resolution*1j, -v_max:v_max:resolution*1j] grid_cart = grid_cart.astype(np.float32) grid_spher = cart2spher(grid_cart) grid_cyl = cart2cyl(grid_cart) dv = centersX[1]-centersX[0] dvvv = np.ones((resolution, resolution, resolution)) * dv ** 3 elif grid_geom == 'spher': edges_rho = np.linspace(0, v_max, resolution + 1, dtype=np.float32) edges_theta = np.linspace(0, np.pi, resolution + 1, dtype=np.float32) edges_phi = np.linspace(0, 2*np.pi, resolution + 1, dtype=np.float32) centers_rho = (edges_rho[:-1] + edges_rho[1:]) * .5 centers_theta = (edges_theta[:-1] + edges_theta[1:]) * .5 centers_phi = (edges_phi[:-1] + edges_phi[1:]) * .5 grid_spher = np.mgrid[centers_rho[0]:centers_rho[-1]:centers_rho.size*1j, centers_theta[0]:centers_theta[-1]:centers_theta.size*1j, centers_phi[0]:centers_phi[-1]:centers_phi.size*1j] grid_spher = grid_spher.astype(np.float32) grid_cart = spher2cart(grid_spher) grid_cyl = cart2cyl(grid_cart) d_rho = centers_rho[1]-centers_rho[0] d_theta = centers_theta[1]-centers_theta[0] d_phi = centers_phi[1]-centers_phi[0] dv = centers_rho[1]-centers_rho[0] dvvv = np.ones((resolution, resolution, resolution)) \ * centers_rho[:, None, None] * d_rho * d_theta * d_phi elif grid_geom == 'cyl': edges_rho = np.linspace(0, v_max, resolution+1, dtype=np.float32) edges_phi = np.linspace(0, 2*np.pi, resolution+1, dtype=np.float32) edges_z = np.linspace(-v_max, v_max, resolution+1, dtype=np.float32) centers_rho = (edges_rho[:-1]+edges_rho[1:])*.5 centers_phi = (edges_phi[:-1]+edges_phi[1:])*.5 centers_z = (edges_z[:-1]+edges_z[1:])*.5 grid_cyl = np.mgrid[centers_rho[0]:centers_rho[-1]:centers_rho.size*1j, centers_phi[0]:centers_phi[-1]:centers_phi.size*1j, centers_z[0]:centers_z[-1]:centers_z.size*1j] grid_cyl = grid_cyl.astype(np.float32) grid_cart = cyl2cart(grid_cyl) grid_spher = cart2spher(grid_cart) dRho = centers_rho[1]-centers_rho[0] dPhi = centers_phi[1]-centers_phi[0] dZ = centers_z[1]-centers_z[0] dvvv = np.ones((resolution, resolution, resolution)) \ * centers_rho[:, None, None]*dRho*dPhi*dZ return grid_cart, grid_spher, grid_cyl, dvvv def spher2cart(v_spher): """Coordinate system conversion """ v_cart = np.zeros_like(v_spher) v_cart[0] = v_spher[0] *
np.sin(v_spher[1])
numpy.sin
from itertools import product import os import numpy as np from openfermion.third_party.representability.constraints.spin_orbital_2pos_constraints import ( # pylint: disable=line-too-long tpdm_antisymmetry_constraint, tpdm_trace_constraint, _coord_generator, tpdm_to_opdm_mapping, opdm_to_ohdm_mapping, sz_constraint, na_constraint, nb_constraint, tpdm_to_thdm_mapping, tpdm_to_phdm_mapping, spin_orbital_linear_constraints) from openfermion.third_party.representability._dualbasis import \ DualBasisElement, DualBasis from openfermion.third_party.representability._namedtensor import Tensor from openfermion.third_party.representability._multitensor import MultiTensor from openfermion.config import DATA_DIRECTORY from openfermion.chem import MolecularData from openfermion.utils import map_two_pdm_to_two_hole_dm, \ map_two_pdm_to_particle_hole_dm def test_trace_constraint(): dbe = tpdm_trace_constraint(4, 10) assert dbe.primal_tensors_names == ['cckk'] * 4**2 assert dbe.primal_elements == [ (i, j, i, j) for i, j in product(range(4), repeat=2) ] assert np.isclose(dbe.constant_bias, 0) assert
np.isclose(dbe.dual_scalar, 10)
numpy.isclose
import numpy as np import numpy.linalg as npl from scipy.stats import t as t_dist def batch_make_design(img_dict, convolved_dict): matrix = {} object_list = ["bottle", "cat", "chair", "face", "house", "scissors", "scrambledpix", "shoe"] for key, img in img_dict.items(): time_course = img.shape[-1] matrix[key] = np.ones((time_course,(len(object_list)+3))) for i in range(matrix[key].shape[-1]-3): matrix[key][:,i] = convolved_dict[key[7:] +"-"+ object_list[i]] LD = np.linspace(-1,1,time_course) LD2 = LD**2 LD2 = LD2 - np.mean(LD2) matrix[key][:, -3] = LD matrix[key][:, -2] = LD2 return matrix def scale_design_mtx(X): """utility to scale the design matrix for display This scales the columns to their own range so we can see the variations across the column for all the columns, regardless of the scaling of the column. """ mi, ma = X.min(axis=0), X.max(axis=0) # Vector that is True for columns where values are not # all almost equal to each other col_neq = (ma - mi) > 1.e-8 Xs = np.ones_like(X) # Leave columns with same value throughout with 1s # Scale other columns to min, max in column mi = mi[col_neq] ma = ma[col_neq] Xs[:,col_neq] = (X[:,col_neq] - mi)/(ma - mi) return Xs def batch_scale_matrix(matrix_dict): result = {} for key, matrix in matrix_dict.items(): result[key] = scale_design_mtx(matrix) return result def batch_convert_2d(img_dict): result = {} for key, img in img_dict.items(): result[key] = np.reshape(img, (-1, img.shape[-1])) return result def batch_convert_2d_based(img_dict, shape_dict): result = {} for key, img in img_dict.items(): result[key] = np.reshape(img, (-1, shape_dict[key][-1])) return result def apply_mask(img_dict, mask_dict): result = {} for key, img in img_dict.items(): mask = (mask_dict[key] == 1) result[key] = img[mask] return result def t_stat(y, X, c): """ betas, t statistic and significance test given data, design matrix, contrast This is OLS estimation; we assume the errors to have independent and identical normal distributions around zero for each $i$ in $\e_i$ (i.i.d). """ # Make sure y, X, c are all arrays #y = np.asarray(y) #X = np.asarray(X) #c = c.T c = np.atleast_2d(c).T # As column vector # Calculate the parameters - b hat beta = npl.pinv(X).dot(y) # The fitted values - y hat fitted = X.dot(beta) # Residual error errors = y - fitted # Residual sum of squares RSS = (errors**2).sum(axis=0) # Degrees of freedom is the number of observations n minus the number # of independent regressors we have used. If all the regressor # columns in X are independent then the (matrix rank of X) == p # (where p the number of columns in X). If there is one column that # can be expressed as a linear sum of the other columns then # (matrix rank of X) will be p - 1 - and so on. df = X.shape[0] -
npl.matrix_rank(X)
numpy.linalg.matrix_rank
from __future__ import absolute_import, division, print_function from six.moves import range import os import h5py import numpy as np from xfel.euxfel.read_geom import read_geom from libtbx.phil import parse import six from libtbx.utils import Sorry import datetime from xfel.util.jungfrau import pad_stacked_format phil_scope = parse(""" unassembled_file = None .type = path .help = hdf5 file used to read in image data. geom_file = None .type = path .help = geometry file to be read in for detector (.geom). output_file = None .type = path .help = output file path detector_distance = None .type = float .help = Detector distance wavelength = None .type = float .help = If not provided, try to find wavelength in unassembled file. beam_file = None .type = path .help = Overrides wavelength. Reads the pulse IDs in the provided file \ to get a list of wavelengths for the master. include_spectra = False .type = bool .help = If true, 2D spectral data will be included in the master file, \ as read from the beam_file. energy_offset = None .type = float .help = If set, add this offset (in eV) to the energy axis in the \ spectra in the beam file and to the per-shot wavelength. mask_file = None .type = str .help = Path to file with external bad pixel mask. split_modules_into_asics = True .type = bool .help = Whether to split the 4x2 modules into indivdual asics \ accounting for borders and gaps. trusted_range = None .type = floats(size=2) .help = Set the trusted range raw = False .type = bool .help = Whether the data being analyzed is raw data from the JF16M or has \ been corrected and padded. unassembled_data_key = None .type = str .expert_level = 2 .help = Override hdf5 key name in unassembled file pedestal_file = None .type = str .help = path to Jungfrau pedestal file gain_file = None .type = str .help = path to Jungfrau gain file raw_file = None .type = str .help = path to Jungfrau raw file nexus_details { instrument_name = SwissFEL ARAMIS BEAMLINE ESB .type = str .help = Name of instrument instrument_short_name = ESB .type = str .help = short name for instrument, perhaps the acronym source_name = SwissFEL ARAMIS .type = str .help = Name of the neutron or x-ray storage ring/facility source_short_name = SwissFEL ARAMIS .type = str .help = short name for source, perhaps the acronym start_time = None .type = str .help = ISO 8601 time/date of the first data point collected in UTC, \ using the Z suffix to avoid confusion with local time end_time = None .type = str .help = ISO 8601 time/date of the last data point collected in UTC, \ using the Z suffix to avoid confusion with local time. \ This field should only be filled when the value is accurately \ observed. If the data collection aborts or otherwise prevents \ accurate recording of the end_time, this field should be omitted end_time_estimated = None .type = str .help = ISO 8601 time/date of the last data point collected in UTC, \ using the Z suffix to avoid confusion with local time. \ This field may be filled with a value estimated before an \ observed value is avilable. sample_name = None .type = str .help = Descriptive name of sample total_flux = None .type = float .help = flux incident on beam plane in photons per second } """) ''' This script creates a master nexus file by taking in as input a) an hdf5 file and b) a .geom file The hd5f file is generated by the JF16M after processing the raw images and doing appropriate gain corrections The assumed parameters for the detector can be seen in the __init__ function and should be changed if they are modified at in the future ''' class jf16m_cxigeom2nexus(object): def __init__(self, args): self.params_from_phil(args) if self.params.detector_distance == None: self.params.detector_distance = 100.0 # Set detector distance arbitrarily if nothing is provided self.hierarchy = read_geom(self.params.geom_file) self.n_quads = 4 self.n_modules = 8 def params_from_phil(self, args): user_phil = [] for arg in args: if os.path.isfile(arg): user_phil.append(parse(file_name=arg)) else: try: user_phil.append(parse(arg)) except Exception as e: raise Sorry("Unrecognized argument: %s"%arg) self.params = phil_scope.fetch(sources=user_phil).extract() def _create_scalar(self, handle,path,dtype,value): dataset = handle.create_dataset(path, (),dtype=dtype) dataset[()] = value def create_vector(self,handle, name, value, **attributes): handle.create_dataset(name, (1,), data = [value], dtype='f') for key,attribute in six.iteritems(attributes): handle[name].attrs[key] = attribute def create_nexus_master_file(self): ''' Hierarchical structure of master nexus file. Format information available here http://download.nexusformat.org/sphinx/classes/base_classes/NXdetector_module.html#nxdetector-module --> entry --> data --> definition (leaf) --> instrument --> sample ''' output_file_name = self.params.output_file if self.params.output_file is not None else os.path.splitext(self.params.unassembled_file)[0]+'_master.h5' f = h5py.File(output_file_name, 'w') f.attrs['NX_class'] = 'NXroot' f.attrs['file_name'] = os.path.basename(output_file_name) f.attrs['file_time'] = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ") f.attrs['HDF5_Version'] = h5py.version.hdf5_version entry = f.create_group('entry') entry.attrs['NX_class'] = 'NXentry' if self.params.nexus_details.start_time: entry['start_time'] = self.params.nexus_details.start_time if self.params.nexus_details.end_time: entry['end_time'] = self.params.nexus_details.end_time if self.params.nexus_details.end_time_estimated: entry['end_time_estimated'] = self.params.nexus_details.end_time_estimated # --> definition self._create_scalar(entry, 'definition', 'S4', np.string_('NXmx')) # --> data data = entry.create_group('data') data.attrs['NX_class'] = 'NXdata' data_key = 'data' if self.params.unassembled_data_key: unassembled_data_key = self.params.unassembled_data_key else: if self.params.raw: unassembled_data_key = "data/JF07T32V01/data" else: unassembled_data_key = "data/data" data[data_key] = h5py.ExternalLink(self.params.unassembled_file, unassembled_data_key) if self.params.raw_file is not None: assert not self.params.raw with h5py.File(self.params.pedestal_file, "r") as pedh5: print("Padding raw pedestal data") mean_pedestal = [pad_stacked_format(raw) for raw in pedh5["gains"]] print("Padding raw pedestal RMS data") sigma_pedestal = [pad_stacked_format(raw) for raw in pedh5["gainsRMS"]] data.create_dataset("pedestal", data=mean_pedestal, dtype=np.float32) data.create_dataset('pedestalRMS', data=sigma_pedestal, dtype=np.float32) with h5py.File(self.params.gain_file, "r") as gainh5: print("Padding gains") gains = [pad_stacked_format(raw) for raw in gainh5["gains"]] data.create_dataset("gains", data=gains, dtype=np.float32) data.attrs['signal'] = 'data' raw_file_handle = h5py.File(self.params.raw_file, "r") res_file_handle = h5py.File(self.params.unassembled_file, "r") raw_dset = raw_file_handle["data/JF07T32V01/data"] raw_shape = raw_dset.shape _, raw_slowDim, raw_fastDim = raw_shape raw_type = raw_dset.dtype num_imgs = res_file_handle['data/data'].shape[0] raw_layout = h5py.VirtualLayout(shape=(num_imgs, raw_slowDim, raw_fastDim), dtype=raw_type) raw_pulses = raw_file_handle['data/JF07T32V01/pulse_id'][()][:, 0] assert np.all(raw_pulses == np.sort(raw_pulses)) # NOTE; this is quick, however I think this is always the case res_pulses = h5py.File(self.params.unassembled_file, 'r')['data/pulse_id'][()] raw_source = h5py.VirtualSource(self.params.raw_file, 'data/JF07T32V01/data', shape=raw_shape) for res_imgnum, raw_imgnum in enumerate(np.searchsorted(raw_pulses, res_pulses)): raw_layout[res_imgnum] = raw_source[raw_imgnum] data.create_virtual_dataset('raw', raw_layout) if self.params.raw: if self.params.pedestal_file: # named gains instead of pedestal in JF data files data['pedestal'] = h5py.ExternalLink(self.params.pedestal_file, 'gains') data['pedestalRMS'] = h5py.ExternalLink(self.params.pedestal_file, 'gainsRMS') if self.params.gain_file: data['gains'] = h5py.ExternalLink(self.params.gain_file, 'gains') if self.params.pedestal_file or self.params.gain_file: data.attrs['signal'] = 'data' #--> sample sample = entry.create_group('sample') sample.attrs['NX_class'] = 'NXsample' if self.params.nexus_details.sample_name: sample['name'] = self.params.nexus_details.sample_name sample['depends_on'] = '.' # This script does not support scans/gonios # --> source source = entry.create_group('source') source.attrs['NX_class'] = 'NXsource' source['name'] = self.params.nexus_details.source_name source['name'].attrs['short_name'] = self.params.nexus_details.source_short_name # --> instrument instrument = entry.create_group('instrument') instrument.attrs['NX_class'] = 'NXinstrument' instrument["name"] = self.params.nexus_details.instrument_name instrument["name"].attrs["short_name"] = self.params.nexus_details.instrument_short_name beam = instrument.create_group('beam') beam.attrs['NX_class'] = 'NXbeam' if self.params.nexus_details.total_flux: self._create_scalar(beam, 'total_flux', 'f', self.params.nexus_details.total_flux) beam['total_flux'].attrs['units'] = 'Hz' if self.params.wavelength is None and self.params.beam_file is None: wavelengths = h5py.File(self.params.unassembled_file, 'r')['instrument/photon_wavelength_A'] beam.create_dataset('incident_wavelength', (1,), data=np.mean(wavelengths), dtype='f8') elif self.params.beam_file is not None: # data file has pulse ids, need to match those to the beam file, which may have more pulses if self.params.raw: data_pulse_ids = h5py.File(self.params.unassembled_file, 'r')['data/JF07T32V01/pulse_id'][()] else: data_pulse_ids = h5py.File(self.params.unassembled_file, 'r')['data/pulse_id'][()] beam_h5 = h5py.File(self.params.beam_file, 'r') beam_pulse_ids = beam_h5['data/SARFE10-PSSS059:SPECTRUM_CENTER/pulse_id'][()] beam_energies = beam_h5['data/SARFE10-PSSS059:SPECTRUM_CENTER/data'][()] energies = np.ndarray((len(data_pulse_ids),), dtype='f8') if self.params.include_spectra: beam_spectra_x = beam_h5['data/SARFE10-PSSS059:SPECTRUM_X/data'][()] beam_spectra_y = beam_h5['data/SARFE10-PSSS059:SPECTRUM_Y/data'][()] spectra_x = np.ndarray((len(data_pulse_ids),beam_spectra_x.shape[1]), dtype='f8') spectra_y = np.ndarray((len(data_pulse_ids),beam_spectra_y.shape[1]), dtype='f8') for i, pulse_id in enumerate(data_pulse_ids): where =
np.where(beam_pulse_ids==pulse_id)
numpy.where
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from collections import Iterable as _Iterable from collections import Callable as _Callable from .op import * from .utils import * from .utils import _pair from .thirdparty._switch_norm import SwitchNorm1d, SwitchNorm2d from .thirdparty import _sn_layers from .thirdparty._oct_conv import OctaveConv as _OctaveConv ''' 注意写 torch.jit.script 时需要手动添加非 Tensor 参数的注释 ''' Identity = nn.Identity # 下面已过时 # class Identity(torch.jit.ScriptModule): # ''' # torch 居然没有 identity 层 # ''' # def __init__(self): # super().__init__() # # @torch.jit.script_method # def forward(self, x): # return x class Upsample(torch.jit.ScriptModule): __constants__ = ['size', 'scale_factor', 'mode', 'align_corners', 'name'] def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=None): super().__init__() # scale_factor 不允许是整数,有点坑。。 if isinstance(scale_factor, _Iterable): scale_factor = tuple([float(i) for i in scale_factor]) else: scale_factor = float(scale_factor) self.size = size self.scale_factor = scale_factor self.mode = mode self.align_corners = align_corners @torch.jit.script_method def forward(self, x): return F.interpolate(x, self.size, self.scale_factor, self.mode, self.align_corners) class UpsamplingConcat(torch.jit.ScriptModule): __constants__ = ['method', 'align_corners'] def __init__(self, method='bilinear', align_corners=False): super().__init__() self.method = method self.align_corners = align_corners @torch.jit.script_method def forward(self, x, shortpoint): shape = shortpoint.shape x = F.interpolate(x, (shape[2], shape[3]), mode=self.method, align_corners=self.align_corners) x = torch.cat((x, shortpoint), 1) return x class Dense(torch.jit.ScriptModule): def __init__(self, in_feat, out_feat, act=None, bias=True, *, norm_layer_kwargs={}): super().__init__() layers = [] den = nn.Linear(in_feat, out_feat, bias=bias is True) layers.append(den) if isinstance(bias, _Callable): layers.append(bias(out_feat, **norm_layer_kwargs)) if act: layers.append(act) self.layers = nn.Sequential(*layers) @torch.jit.script_method def forward(self, inputs): outputs = self.layers(inputs) return outputs class Dense_SN(nn.Module): def __init__(self, in_feat, out_feat, act=None, bias=True, num_itrs=1, *, norm_layer_kwargs={}): super().__init__() layers = [] den = nn.Linear(in_feat, out_feat, bias=bias is True) den = nn.utils.spectral_norm(den, n_power_iterations=num_itrs) layers.append(den) if isinstance(bias, _Callable): layers.append(bias(out_feat, **norm_layer_kwargs)) if act: layers.append(act) self.layers = nn.Sequential(*layers) def forward(self, inputs): outputs = self.layers(inputs) return outputs class _base_conv_setting(torch.jit.ScriptModule): # 用于初始化和保存卷积设置 def __init__(self, in_ch, out_ch, ker_sz, stride, pad, act, bias, dila): super().__init__() self.in_ch = in_ch self.out_ch = out_ch self.ker_sz = ker_sz self.stride = stride self.act = act self.bias = bias self.dila = dila self.pad = get_padding_by_name(ker_sz, pad) class Conv2D_SN(_base_conv_setting): def __init__(self, in_ch, out_ch, ker_sz=3, stride=1, pad='same', act=None, bias=True, groups=1, dila=1, num_itrs=1, *, use_fixup_init=False, norm_kwargs={}): super().__init__(in_ch, out_ch, ker_sz, stride, pad, act, bias, dila) layers = [] conv = nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=ker_sz, stride=stride, padding=self.pad, dilation=dila, groups=groups, bias=bias is True) conv = nn.utils.spectral_norm(conv, n_power_iterations=num_itrs) if use_fixup_init: fixup_init(conv.weight, _pair(ker_sz), out_ch) if not bias: with torch.no_grad(): conv.bias.zero_() layers.append(conv) if isinstance(bias, _Callable): layers.append(bias(out_ch, **norm_kwargs)) if act: layers.append(act) self.layers = nn.Sequential(*layers) def forward(self, inputs): outputs = self.layers(inputs) return outputs class Conv2D(_base_conv_setting): def __init__(self, in_ch, out_ch, ker_sz=3, stride=1, pad='same', act=None, bias=True, groups=1, dila=1, *, use_fixup_init=False, norm_kwargs={}): super().__init__(in_ch, out_ch, ker_sz, stride, pad, act, bias, dila) layers = [] conv = nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=ker_sz, stride=stride, padding=self.pad, dilation=dila, groups=groups, bias=bias is True) if use_fixup_init: fixup_init(conv.weight, _pair(ker_sz), out_ch) if bias is True: with torch.no_grad(): conv.bias.zero_() layers.append(conv) if isinstance(bias, _Callable): layers.append(bias(out_ch, **norm_kwargs)) if act: layers.append(act) self.layers = nn.Sequential(*layers) @torch.jit.script_method def forward(self, inputs): outputs = self.layers(inputs) return outputs class DeConv2D(_base_conv_setting): def __init__(self, in_ch, out_ch, ker_sz=3, stride=1, pad='same', act=None, bias=True, groups=1, dila=1, *, use_fixup_init=False, norm_kwargs={}): super().__init__(in_ch, out_ch, ker_sz, stride, pad, act, bias, dila) layers = [] conv = nn.ConvTranspose2d(in_channels=in_ch, out_channels=out_ch, kernel_size=ker_sz, stride=stride, padding=self.pad, output_padding=stride-1, dilation=dila, groups=groups, bias=bias is True) if use_fixup_init: fixup_init(conv.weight, _pair(ker_sz), out_ch) if bias is True: with torch.no_grad(): conv.bias.zero_() layers.append(conv) if isinstance(bias, _Callable): layers.append(bias(out_ch, **norm_kwargs)) if act: layers.append(act) self.layers = nn.Sequential(*layers) @torch.jit.script_method def forward(self, inputs): outputs = self.layers(inputs) return outputs class DwConv2D(_base_conv_setting): def __init__(self, in_ch, depth_multiplier=1, ker_sz=3, stride=1, pad='same', act=None, bias=True, dila=1, *, use_fixup_init=False, norm_kwargs={}): out_ch = in_ch * depth_multiplier super().__init__(in_ch, out_ch, ker_sz, stride, pad, act, bias, dila) layers = [] conv = nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=ker_sz, stride=stride, padding=self.pad, dilation=dila, groups=in_ch, bias=bias is True) if use_fixup_init: fixup_init(conv.weight, _pair(ker_sz), out_ch) if bias is True: with torch.no_grad(): conv.bias.zero_() layers.append(conv) if isinstance(bias, _Callable): layers.append(bias(out_ch, **norm_kwargs)) if act: layers.append(act) self.layers = nn.Sequential(*layers) @torch.jit.script_method def forward(self, inputs): outputs = self.layers(inputs) return outputs class OctConv2D(_base_conv_setting): def __init__(self, in_ch, out_ch, ker_sz=3, stride=1, pad='same', act=None, bias=True, groups=1, dila=1, alpha=(0.5, 0.5), *, use_fixup_init=False, norm_kwargs={}): super().__init__(in_ch, out_ch, ker_sz, stride, pad, act, bias, dila) self.act = act if act is None: self.act = Identity() # 限定输入和输出必定存在高频特征 assert 0. <=
np.min(alpha)
numpy.min
import os import numpy as np import pytest from jina.flow import Flow from jina.proto import jina_pb2 from jina.types.ndarray.generic import NdArray from tests import validate_callback NUM_DOCS = 100 cur_dir = os.path.dirname(os.path.abspath(__file__)) @pytest.fixture def multimodal_documents(): docs = [] for idx in range(0, NUM_DOCS): """ doc - idx | | - chunk - blob [idx, idx] - modality1 -> The dummy encoder will pass the blob to embedding | - chunk - blob [idx, idx, idx] - modality2 -> The dummy encoder will pass the blob to embedding Result: doc - idx - embedding [idx, idx, idx, idx, idx] """ doc = jina_pb2.DocumentProto() doc.text = f'{idx}' for modality in ['modality1', 'modality2']: chunk = doc.chunks.add() chunk.modality = modality if modality == 'modality1': NdArray(chunk.blob).value = np.array([idx, idx]) else: NdArray(chunk.blob).value = np.array([idx, idx, idx]) docs.append(doc) return docs # TODO(Deepankar): Gets stuck when `restful: True` - issues with `needs='gateway'` @pytest.mark.parametrize('restful', [False]) def test_multimodal_embedding_parallel(multimodal_documents, mocker, monkeypatch, restful): monkeypatch.setenv("RESTFUL", restful) def validate_response(resp): assert len(resp.index.docs) == NUM_DOCS for idx, doc in enumerate(resp.index.docs): np.testing.assert_almost_equal(NdArray(doc.embedding).value,
np.array([idx, idx, idx, idx, idx])
numpy.array
import tables import numpy as np np.random.seed(0) class IODataPoint(tables.IsDescription): x = tables.Int64Col(3) y = tables.Int64Col(3) def generateData(n,current_y,table): ptr = table.row input_indexes = np.random.poisson(50, (int(2.1*10**(n-2)),3) ) for i in range(1,input_indexes.shape[0]): input_indexes[i,:] = input_indexes[i,:]+input_indexes[i-1,:] j=
np.array([0,0,0])
numpy.array
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests dynamic programming code. """ from absl.testing import absltest import numpy as np from autoregressive_diffusion.utils import dynamic_programming class DPTest(absltest.TestCase): """Tests categorical diffusion class.""" def test_compute_fixed_budget(self): """Tests for a specific KL if the program computes cost correctly.""" kl_per_t =
np.array([5., 4., 3., 2.85, 2.80])
numpy.array
"""PyCMDS.""" # --- import -------------------------------------------------------------------------------------- import numpy as np import tidy_headers from ._data import Data from .. import kit as wt_kit # --- define -------------------------------------------------------------------------------------- __all__ = ["from_PyCMDS"] # --- from function ------------------------------------------------------------------------------- def from_PyCMDS(filepath, name=None, parent=None, verbose=True): """Create a data object from a single PyCMDS output file. Parameters ---------- filepath : str The file to load. Can accept .data, .fit, or .shots files. name : str or None (optional) The name to be applied to the new data object. If None, name is read from file. parent : WrightTools.Collection (optional) Collection to place new data object within. Default is None. verbose : bool (optional) Toggle talkback. Default is True. Returns ------- data A Data instance. """ # header headers = tidy_headers.read(filepath) # name if name is None: # name not given in method arguments data_name = headers["data name"] else: data_name = name if data_name == "": # name not given in PyCMDS data_name = headers["data origin"] # create data object kwargs = { "name": data_name, "kind": "PyCMDS", "source": filepath, "created": headers["file created"], } if parent is not None: data = parent.create_data(**kwargs) else: data = Data(**kwargs) # array arr = np.genfromtxt(filepath).T # get axes and scanned variables axes = [] for name, identity, units in zip( headers["axis names"], headers["axis identities"], headers["axis units"] ): # points and centers points = np.array(headers[name + " points"]) if name + " centers" in headers.keys(): centers = headers[name + " centers"] else: centers = None # create axis = { "points": points, "units": units, "name": name, "identity": identity, "centers": centers, } axes.append(axis) shape = tuple([a["points"].size for a in axes]) for i, ax in enumerate(axes): sh = [1] * len(shape) sh[i] = len(ax["points"]) data.create_variable( name=ax["name"] + "_points", values=np.array(ax["points"]).reshape(sh) ) if ax["centers"] is not None: sh = [1] * len(shape) sh[i - 1] = len(axes[i - 1]["points"]) data.create_variable( name=ax["name"] + "_centers", values=
np.array(ax["centers"])
numpy.array
from tkinter import * from tkinter import ttk import tkinter.filedialog as filedialog from tkinter import messagebox from PIL import Image,ImageDraw,ImageFont from PIL import ImageTk,ImageGrab import cv2 from skimage import filters #import rasterio import matplotlib.pyplot as pyplt #from matplotlib.figure import Figure import numpy as np import os #import time import csv import scipy.linalg as la from functools import partial #import threading #import sys #import kplus from sklearn.cluster import KMeans import tkintercorestat #import tkintercorestat_plot import tkintercore import cal_kernelsize #import histograms #import createBins import axistest #from multiprocessing import Pool import lm_method #import batchprocess import sel_area class img(): def __init__(self,size,bands): self.size=size self.bands=bands import batchprocess displayimg={'Origin':None, 'PCs':None, 'Color Deviation':None, 'ColorIndices':None, 'Output':None} previewimg={'Color Deviation':None, 'ColorIndices':None} #cluster=['LabOstu','NDI'] #,'Greenness','VEG','CIVE','MExG','NDVI','NGRDI','HEIGHT'] #cluster=['LabOstu','NDI','Greenness','VEG','CIVE','MExG','NDVI','NGRDI','HEIGHT','Band1','Band2','Band3'] cluster=['PAT_R','PAT_G','PAT_B', 'DIF_R','DIF_G','DIF_B', 'ROO_R','ROO_G','ROO_B', 'GLD_R','GLD_G','GLD_B', 'Band1','Band2','Band3'] colorbandtable=np.array([[255,0,0],[255,127,0],[255,255,0],[127,255,0],[0,255,255],[0,127,255],[0,0,255],[127,0,255],[75,0,130],[255,0,255]],'uint8') #print('colortableshape',colortable.shape) filenames=[] Multiimage={} Multigray={} Multitype={} Multiimagebands={} Multigraybands={} workbandarray={} displaybandarray={} originbandarray={} colorindicearray={} clusterdisplay={} kernersizes={} multi_results={} outputimgdict={} outputimgbands={} outputsegbands={} originsegbands={} oldpcachoice=[] multiselectitems=[] coinbox_list=[] pre_checkbox=[] originpcabands={} batch={'PCweight':[], 'PCsel':[], 'Kmeans':[], 'Kmeans_sel':[], 'Area_max':[], 'Area_min':[], 'shape_max':[], 'shape_min':[], 'nonzero':[]} root=Tk() root.title('GridFree v.1.1.0 ') root.geometry("") root.option_add('*tearoff',False) emptymenu=Menu(root) root.config(menu=emptymenu) screenheight=root.winfo_screenheight() screenwidth=root.winfo_screenwidth() print('screenheight',screenheight,'screenwidth',screenwidth) screenstd=min(screenheight-100,screenwidth-100,850) coinsize=StringVar() selarea=StringVar() refvar=StringVar() imgtypevar=StringVar() edge=StringVar() kmeans=IntVar() pc_combine_up=DoubleVar() pc_combine_down=IntVar() filedropvar=StringVar() displaybut_var=StringVar() buttonvar=IntVar() bandchoice={} checkboxdict={} #minipixelareaclass=0 coinbox=None currentfilename='' currentlabels=None displaylabels=None workingimg=None displaypclabels=None boundaryarea=None outputbutton=None font=None reseglabels=None coindict=None ## Funcitons refarea=None originlabels=None originlabeldict=None changekmeans=False convband=None reflabel=0 minflash=[] dotflash=[] labelplotmap={} mappath='' elesize=[] labellist=[] figdotlist={} havecolorstrip=True kmeanschanged=False pcweightchanged=False originbinaryimg=None clusterchanged=False originselarea=False zoomoff=False maxx=0 minx=0 bins=None loccanvas=None linelocs=[0,0,0,0] maxy=0 miny=0 segmentratio=0 zoombox=[] displayfea_l=0 displayfea_w=0 resizeshape=[] previewshape=[] pcbuttons=[] pcbuttonsgroup=[] def distance(p1,p2): return np.sum((p1-p2)**2) def findratio(originsize,objectsize): oria=originsize[0] orib=originsize[1] obja=objectsize[0] objb=objectsize[1] if oria>obja or orib>objb: ratio=round(max((oria/obja),(orib/objb))) else: ratio=round(min((obja/oria),(objb/orib))) # if oria*orib>850 * 850: if oria*orib>screenstd * screenstd: if ratio<2: ratio=2 return ratio def getkeys(dict): return [*dict] def deletezoom(event,widget): print('leave widget') if len(zoombox)>0: for i in range(len(zoombox)): #print('delete') widget.delete(zoombox.pop(0)) widget.update() def zoom(event,widget,img): global zoombox x=event.x y=event.y #print(x,y) if len(zoombox)>1: widget.delete(zoombox.pop(0)) #print('delete') crop=img.crop((x-15,y-15,x+15,y+15)) w,h=crop.size #print(w,h) crop=crop.resize([w*3,h*3],resample=Image.BILINEAR) w,h=crop.size crop=ImageTk.PhotoImage(crop) zoombox.append(widget.create_image(x+5,y-5,image=crop)) root.update_idletasks() raise NameError #time.sleep(0.1) def changedisplay_pc(frame): for widget in frame.winfo_children(): widget.pack_forget() #widget.configure(image=displayimg[text]) #widget.image=displayimg[text] #widget.pack() w=displayimg['PCs']['Size'][1] l=displayimg['PCs']['Size'][0] widget.config(width=w,height=l) widget.create_image(0,0,image=displayimg['PCs']['Image'],anchor=NW) widget.pack() widget.update() def pcweightupdate(displayframe): getPCs() changedisplay_pc(displayframe) def buttonpress(val,displayframe,buttonframe): global buttonvar,pc_combine_up,kmeans buttonvar.set(val) kmeans.set(1) pc_combine_up.set(0.5) buttonchildren=buttonframe.winfo_children() for child in buttonchildren: child.config(highlightbackground='white') print(buttonchildren[val]) buttonchild=buttonchildren[val] buttonchild.config(highlightbackground='red') print('press button ',buttonvar.get()) getPCs() changedisplay_pc(displayframe) # if kmeans.get()>1: changekmeansbar('') beforecluster('') # changecluster('') def PCbuttons(frame,displayframe): #display pc buttons # buttonvar=IntVar() #buttonvar.set(0) for widget in frame.winfo_children(): widget.pack_forget() buttonframe=LabelFrame(frame) buttonframe.pack() for i in range(len(pcbuttons)): butimg=pcbuttons[i] but=Button(buttonframe,text='',image=butimg,compound=TOP,command=partial(buttonpress,i,displayframe,buttonframe)) if i==buttonvar.get(): but.config(highlightbackground='red') row=int(i/3) col=i%3 # print(row,col) but.grid(row=int(i/3),column=col) print('default button',buttonvar.get()) # change cluster,display def displaypreview(text): global figcanvas,resviewframe for widget in resviewframe.winfo_children(): widget.pack_forget() # previewframe=Canvas(frame,width=450,height=400,bg='white') figcanvas.pack() figcanvas.delete(ALL) if text=='Color Deviation': previewtext='ColorIndices' if text=='ColorIndices': previewtext='Color Deviation' previewimage=previewimg[previewtext]['Image'] figcanvas.create_image(0,0,image=previewimage,anchor=NW) figcanvas.update() def switchevent(event,widget,img): global zoomoff,zoomfnid_m,zoomfnid_l,zoombox zoomoff= not zoomoff if zoomoff==True: widget.unbind('<Motion>',zoomfnid_m) widget.unbind('<Leave>',zoomfnid_l) if len(zoombox)>0: for i in range(len(zoombox)): widget.delete(zoombox.pop(0)) widget.update() else: zoomfnid_m=widget.bind('<Motion>',lambda event,arg=widget:zoom(event,arg,img)) zoomfnid_l=widget.bind('<Leave>',lambda event,arg=widget:deletezoom(event,arg)) def changedisplayimg(frame,text): global displaybut_var,figcanvas,resviewframe,reflabel displaybut_var.set(disbuttonoption[text]) for widget in frame.winfo_children(): widget.pack_forget() #widget.configure(image=displayimg[text]) #widget.image=displayimg[text] #widget.pack() w=displayimg[text]['Size'][1] l=displayimg[text]['Size'][0] widget.config(width=w,height=l) widget.create_image(0,0,image=displayimg[text]['Image'],anchor=NW) widget.pack() widget.update() global rects,selareapos,app,delapp,delrects,delselarea,originselarea global zoomfnid_m,zoomfnid_l app=sel_area.Application(widget) # delapp=sel_area.Application(widget) if text=='Output': try: image=outputsegbands[currentfilename]['iter0'] displayfig() except: return zoomfnid_m=widget.bind('<Motion>',lambda event,arg=widget:zoom(event,arg,image)) zoomfnid_l=widget.bind('<Leave>',lambda event,arg=widget:deletezoom(event,arg)) delrects=app.start(zoomfnid_m,zoomfnid_l) widget.bind('<Double-Button-1>',lambda event,arg=widget:switchevent(event,arg,image)) print('delrects',delrects) else: reflabel=0 print('reflabel=',reflabel) try: delelareadim=app.getinfo(delrects[1]) if delelareadim!=[]: delselarea=delelareadim app.end() except: pass if text=='Origin': try: image=originsegbands['Origin'] zoomfnid_m=widget.bind('<Motion>',lambda event,arg=widget:zoom(event,arg,image)) zoomfnid_l=widget.bind('<Leave>',lambda event,arg=widget:deletezoom(event,arg)) except: return widget.bind('<Double-Button-1>',lambda event,arg=widget:switchevent(event,arg,image)) for widget in resviewframe.winfo_children(): widget.pack_forget() rects=app.start() print(rects) originselarea=True else: widget.unbind('<Motion>') selareadim=app.getinfo(rects[1]) if selareadim!=[]: selareapos=selareadim app.end(rects) if text=='PCs': selareadim=app.getinfo(rects[1]) if selareadim!=[0,0,1,1] and selareadim!=[] and selareadim!=selareapos: selareapos=selareadim if selareapos!=[0,0,1,1] and originselarea==True: #need to redo PCA npfilter=np.zeros((displayimg['Origin']['Size'][0],displayimg['Origin']['Size'][1])) filter=Image.fromarray(npfilter) draw=ImageDraw.Draw(filter) draw.ellipse(selareapos,fill='red') filter=np.array(filter) filter=np.divide(filter,np.max(filter)) filter=cv2.resize(filter,(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR) partialsingleband(filter) originselarea=False pass PCbuttons(resviewframe,frame) pass if text=='Color Deviation': #displaypreview displaypreview(text) pass if text=='ColorIndices': #displaypreview displaypreview(text) pass #print('change to '+text) #time.sleep(1) def updateresizeshape(shape,content): shape.append(int(content)) return shape def generatedisplayimg(filename): # init display images global resizeshape,previewshape try: # firstimg=Multiimagebands[filename] #height,width=firstimg.size # height,width,c=displaybandarray[filename]['LabOstu'].shape bandsize=Multiimagebands[filename].size if bandsize[0]*bandsize[1]>2000*2000: ratio=findratio([bandsize[0],bandsize[1]],[2000,2000]) else: ratio=1 height,width=bandsize[0]/ratio,bandsize[1]/ratio # ratio=findratio([height,width],[850,850]) ratio=findratio([height,width],[screenstd,screenstd]) print('displayimg ratio',ratio) resizeshape=[] # if height*width<850*850: if height*width<screenstd*screenstd: #resize=cv2.resize(Multiimage[filename],(int(width*ratio),int(height*ratio)),interpolation=cv2.INTER_LINEAR) updateresizeshape(resizeshape,width*ratio) updateresizeshape(resizeshape,height*ratio) # resizeshape.append(width*ratio) # resizeshape.append(height*ratio) if height>screenstd: resizeshape=[] ratio=round(height/screenstd) updateresizeshape(resizeshape,width*ratio) updateresizeshape(resizeshape,height*ratio) if width>screenstd: resizeshape=[] ratio=round(width/screenstd) updateresizeshape(resizeshape,width*ratio) updateresizeshape(resizeshape,height*ratio) else: #resize=cv2.resize(Multiimage[filename],(int(width/ratio),int(height/ratio)),interpolation=cv2.INTER_LINEAR) updateresizeshape(resizeshape,width/ratio) updateresizeshape(resizeshape,height/ratio) ratio=findratio([height,width],[400,450]) previewshape=[] if height*width<450*400: #resize=cv2.resize(Multiimage[filename],(int(width*ratio),int(height*ratio)),interpolation=cv2.INTER_LINEAR) updateresizeshape(previewshape,width*ratio) updateresizeshape(previewshape,height*ratio) if height>400: previewshape=[] ratio=round(height/screenstd) updateresizeshape(previewshape,width/ratio) updateresizeshape(previewshape,height/ratio) if width>450: previewshape=[] ratio=round(width/screenstd) updateresizeshape(previewshape,width/ratio) updateresizeshape(previewshape,height/ratio) else: #resize=cv2.resize(Multiimage[filename],(int(width/ratio),int(height/ratio)),interpolation=cv2.INTER_LINEAR) updateresizeshape(previewshape,width/ratio) updateresizeshape(previewshape,height/ratio) resize=cv2.resize(Multiimage[filename],(int(resizeshape[0]),int(resizeshape[1])),interpolation=cv2.INTER_LINEAR) originimg=Image.fromarray(resize.astype('uint8')) originsegbands.update({'Origin':originimg}) rgbimg=Image.fromarray(resize.astype('uint8')) draw=ImageDraw.Draw(rgbimg) suggsize=14 font=ImageFont.truetype('cmb10.ttf',size=suggsize) content='\n File: '+filename draw.text((10-1, 10+1), text=content, font=font, fill='white') draw.text((10+1, 10+1), text=content, font=font, fill='white') draw.text((10-1, 10-1), text=content, font=font, fill='white') draw.text((10+1, 10-1), text=content, font=font, fill='white') #draw.text((10,10),text=content,font=font,fill=(141,2,31,0)) draw.text((10,10),text=content,font=font,fill='black') rgbimg=ImageTk.PhotoImage(rgbimg) tempdict={} tempdict.update({'Size':resize.shape}) tempdict.update({'Image':rgbimg}) except: tempdict={} tempimg=np.zeros((screenstd,screenstd)) tempdict.update({'Size':tempimg.shape}) tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(tempimg.astype('uint8')))}) displayimg['Origin']=tempdict #if height*width<850*850: # resize=cv2.resize(Multigray[filename],(int(width*ratio),int(height*ratio)),interpolation=cv2.INTER_LINEAR) #else: #resize=cv2.resize(Multigray[filename],(int(width/ratio),int(height/ratio)),interpolation=cv2.INTER_LINEAR) tempimg=np.zeros((screenstd,screenstd)) tempdict={} try: tempdict.update({'Size':resize.shape}) tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(np.zeros((int(resizeshape[1]),int(resizeshape[0]))).astype('uint8')))}) except: tempdict.update({'Size':tempimg.shape}) #if height*width<850*850: # tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(np.zeros((int(height*ratio),int(width*ratio))).astype('uint8')))}) #else: # tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(np.zeros((int(height/ratio),int(width/ratio))).astype('uint8')))}) # tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(np.zeros((int(resizeshape[1]),int(resizeshape[0]))).astype('uint8')))}) tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(tempimg.astype('uint8')))}) displayimg['Output']=tempdict tempdict={} try: tempdict.update({'Size':resize.shape}) tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(np.zeros((int(resizeshape[1]),int(resizeshape[0]))).astype('uint8')))}) except: tempdict.update({'Size':tempimg.shape}) tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(tempimg.astype('uint8')))}) displayimg['PCs']=tempdict tempdict={} temppreviewdict={} temppreviewimg=np.zeros((450,400)) try: tempband=np.zeros((displaybandarray[filename]['LabOstu'][:,:,0].shape)) # tempband=tempband+displaybandarray[filename]['LabOstu'] # ratio=findratio([tempband.shape[0],tempband.shape[1]],[850,850]) #if tempband.shape[0]*tempband.shape[1]<850*850: # tempband=cv2.resize(ratio,(int(tempband.shape[1]*ratio),int(tempband.shape[0]*ratio)),interpolation=cv2.INTER_LINEAR) #else: # tempband=cv2.resize(ratio,(int(tempband.shape[1]/ratio),int(tempband.shape[0]/ratio)),interpolation=cv2.INTER_LINEAR) tempband=cv2.resize(tempband,(int(resizeshape[0]),int(resizeshape[1])),interpolation=cv2.INTER_LINEAR) tempdict.update({'Size':tempband.shape}) tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(tempband[:,:,2].astype('uint8')))}) temppreview=cv2.resize(tempband,(int(previewshape[0]),int(previewshape[1])),interpolation=cv2.INTER_LINEAR) temppreview=Image.fromarray(temppreview.astype('uint8')) temppreviewdict.update({'Size':previewshape}) temppreviewdict.update({'Image':ImageTk.PhotoImage(temppreview)}) # print('resizeshape',resizeshape) #pyplt.imsave('displayimg.png',tempband[:,:,0]) #indimg=cv2.imread('displayimg.png') except: tempdict.update({'Size':tempimg.shape}) tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(tempimg.astype('uint8')))}) temppreviewdict.update({'Size':temppreviewimg.shape}) temppreviewdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(temppreviewimg.astype('uint8')))}) displayimg['ColorIndices']=tempdict previewimg['ColorIndices']=temppreviewdict #resize=cv2.resize(Multigray[filename],(int(resizeshape[0]),int(resizeshape[1])),interpolation=cv2.INTER_LINEAR) #grayimg=ImageTk.PhotoImage(Image.fromarray(resize.astype('uint8'))) #tempdict={} #tempdict.update({'Size':resize.shape}) #tempdict.update({'Image':grayimg}) tempdict={} temppreviewdict={} try: colordeviate=np.zeros((tempband[:,:,0].shape[0],tempband[:,:,0].shape[1],3),'uint8') kvar=int(kmeans.get()) for i in range(kvar): locs=np.where(tempband[:,:,0]==i) colordeviate[locs]=colorbandtable[i,:] # pyplt.imsave('colordeviation.png',colordeviate) # # colordevimg=Image.fromarray(colordeviate.astype('uint8')) # # colordevimg.save('colordeviation.png',"PNG") # testcolor=Image.open('colordeviation.png') print('colordeviation.png') # colortempdict={} colordeviate=cv2.resize(colordeviate,(int(resizeshape[0]),int(resizeshape[1])),interpolation=cv2.INTER_LINEAR) tempdict.update({'Size':colordeviate.shape}) tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(colordeviate.astype('uint8')))}) # colortempdict.update({'Size':colordeviate.shape}) # colortempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(colordeviate.astype('uint8')))}) # colortempdict.update({'Image':ImageTk.PhotoImage(testcolor)}) # tempdict={} temppreview=cv2.resize(colordeviate,(int(previewshape[0]),int(previewshape[1])),interpolation=cv2.INTER_LINEAR) temppreviewdict.update({'Size':temppreview.shape}) temppreviewdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(temppreview[:,:,0].astype('uint8')))}) except: tempdict.update({'Size':tempimg.shape}) tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(tempimg.astype('uint8')))}) temppreviewdict.update({'Size':temppreviewimg.shape}) temppreviewdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(temppreviewimg.astype('uint8')))}) # displayimg['Color Deviation']=colortempdict displayimg['Color Deviation']=tempdict previewimg['Color Deviation']=temppreviewdict def Open_File(filename): #add to multi-image,multi-gray #call band calculation global Multiimage,Multigray,Multitype,Multiimagebands,Multigraybands,filenames try: Filersc=cv2.imread(filename,flags=cv2.IMREAD_ANYCOLOR) ndim=np.ndim(Filersc) if ndim==2: height,width=np.shape(Filersc) channel=1 Filersc.reshape((height,width,channel)) else: height,width,channel=np.shape(Filersc) Filesize=(height,width) print('filesize:',height,width) RGBfile=cv2.cvtColor(Filersc,cv2.COLOR_BGR2RGB) Multiimage.update({filename:RGBfile}) if ndim==2: Grayfile=np.copy(Filersc) else: Grayfile=cv2.cvtColor(Filersc,cv2.COLOR_BGR2Lab) Grayfile=cv2.cvtColor(Grayfile,cv2.COLOR_BGR2GRAY) #Grayfile=cv2.GaussianBlur(Grayfile,(3,3),cv2.BORDER_DEFAULT) #ostu=filters.threshold_otsu(Grayfile) #Grayfile=Grayfile.astype('float32') #Grayfile=Grayfile/ostu Grayimg=img(Filesize,Grayfile) RGBbands=np.zeros((channel,height,width)) for j in range(channel): band=RGBfile[:,:,j] band=np.where(band==0,1e-6,band) nans=np.isnan(band) band[nans]=1e-6 #ostu=filters.threshold_otsu(band) #band=band/ostu RGBbands[j,:,:]=band RGBimg=img(Filesize,RGBbands) tempdict={filename:RGBimg} Multiimagebands.update(tempdict) tempdict={filename:Grayfile} Multigray.update(tempdict) tempdict={filename:0} Multitype.update(tempdict) tempdict={filename:Grayimg} Multigraybands.update(tempdict) except: messagebox.showerror('Invalid Image Format','Cannot open '+filename) return False filenames.append(filename) return True def Open_Map(): if proc_mode[proc_name].get()=='1': batchprocess.Open_batchfile() return global mappath,elesize,labellist filepath=filedialog.askopenfilename() if len(filepath)>0: if 'csv' in filepath: mappath=filepath elesize=[] labellist=[] rows=[] print('open map at: '+mappath) with open(mappath,mode='r',encoding='utf-8-sig') as f: csvreader=csv.reader(f) for row in csvreader: rows.append(row) temprow=[] for ele in row: if ele is not '': temprow.append(ele) elesize.append(len(temprow)) for i in range(len(rows)): for j in range(len(rows[i])): if rows[i][j]!='': labellist.append(rows[i][j]) else: messagebox.showerror('Invalide File',message='Please open csv formate file as map file.') corlortable=tkintercorestat.get_colortable(reseglabels) tup=(reseglabels,[],corlortable,{},currentfilename) print(elesize) mapdict,mapimage,smallset=showcounting(tup,True,True,True) tempimgbands={} tempimgdict={} tempsmall={} tempimgbands.update({'iter0':mapimage}) tempimgdict.update({'iter0':mapdict}) tempsmall.update({'iter0':smallset}) outputimgdict.update({currentfilename:tempimgdict}) outputimgbands.update({currentfilename:tempimgbands}) outputsegbands.update({currentfilename:tempsmall}) changeoutputimg(currentfilename,'1') def Open_Multifile(): global extractbutton,outputbutton if proc_mode[proc_name].get()=='1': batchprocess.Open_batchfolder() extractbutton.config(state=NORMAL) outputbutton.config(state=NORMAL) return # else: # extractbutton.config(state=DISABLED) global Multiimage,Multigray,Multitype,Multiimagebands,changefileframe,imageframe,Multigraybands,filenames global changefiledrop,filedropvar,originbandarray,displaybandarray,clusterdisplay,currentfilename,resviewframe global refsubframe,reseglabels,refbutton,figcanvas,loccanvas,originlabels,changekmeans,refarea global originlabeldict,convband,panelA global havecolorstrip global colordicesband,oldpcachoice global pccombinebar_up global displaylabels,displaypclabels global buttonvar global colorindicearray global selarea MULTIFILES=filedialog.askopenfilenames() root.update() if len(MULTIFILES)>0: Multiimage={} Multigray={} Multitype={} Multiimagebands={} Multigraybands={} filenames=[] originbandarray={} colorindicearray={} displaybandarray={} clusterdisplay={} oldpcachoice=[] reseglabels=None originlabels=None originlabeldict=None #changekmeans=True convband=None refvar.set('0') kmeans.set('2') panelA.delete(ALL) panelA.unbind('<Button-1>') panelA.unbind('<Shift-Button-1>') refarea=None havecolorstrip=False displaypclabels=None buttonvar.set(0) # if 'NDI' in bandchoice: # bandchoice['NDI'].set('1') # if 'NDVI' in bandchoice: # bandchoice['NDVI'].set('1') refbutton.config(state=DISABLED) # selareabutton.configure(state=DISABLED) selarea.set('0') figcanvas.delete(ALL) #loccanvas=None for widget in refsubframe.winfo_children(): widget.config(state=DISABLED) #for widget in resviewframe.winfo_children(): # widget.config(state=DISABLED) if outputbutton is not None: outputbutton.config(state=DISABLED) for i in range(len(MULTIFILES)): if Open_File(MULTIFILES[i])==False: return generatedisplayimg(filenames[0]) changedisplayimg(imageframe,'Origin') # imageframe.update() # raise NameError # yield # thread=threading.Thread(target=singleband,args=(MULTIFILES[i],)) singleband(MULTIFILES[i]) # thread.start() # thread.join() for widget in changefileframe.winfo_children(): widget.pack_forget() currentfilename=filenames[0] # filedropvar.set(filenames[0]) # changefiledrop=OptionMenu(changefileframe,filedropvar,*filenames,command=partial(changeimage,imageframe)) # changefiledrop.pack() #singleband(filenames[0]) generatedisplayimg(filenames[0]) # changedisplayimg(imageframe,'Origin') getPCs() if len(bandchoice)>0: for i in range(len(cluster)): bandchoice[cluster[i]].set('') #changedisplayimg(imageframe,'Origin') kmeans.set(1) #reshapemodified_tif=np.zeros((displaybandarray[currentfilename]['LabOstu'].shape[0]*displaybandarray[currentfilename]['LabOstu'].shape[1],3)) #colordicesband=kmeansclassify(['LabOstu'],reshapemodified_tif) displaylabels=kmeansclassify() generateimgplant('') changedisplayimg(imageframe,'Origin') # if len(bandchoice)>0: # bandchoice['LabOstu'].set('1') global buttondisplay,pcaframe,kmeansbar for widget in buttondisplay.winfo_children(): widget.config(state=NORMAL) # for widget in pcaframe.winfo_children(): # for widget in pcselframe.winfo_children(): # widget.config(state=NORMAL) extractbutton.config(state=NORMAL) kmeansbar.state(["!disabled"]) pccombinebar_up.state(["!disabled"]) def fillpartialbands(vector,vectorindex,band,filter_vector): nonzero=np.where(filter_vector!=0) vector[nonzero,vectorindex]=vector[nonzero,vectorindex]+band def fillbands(originbands,displaybands,vector,vectorindex,name,band,filter=0): tempdict={name:band} if isinstance(filter,int): if name not in originbands: originbands.update(tempdict) image=cv2.resize(band,(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR) displaydict={name:image} displaybands.update(displaydict) fea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0] vector[:,vectorindex]=vector[:,vectorindex]+fea_bands else: if name not in originbands: originbands.update(tempdict) image=cv2.resize(band,(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR) image=np.multiply(image,filter) displaydict={name:image} displaybands.update(displaydict) fea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0] vector[:,vectorindex]=vector[:,vectorindex]+fea_bands return def plot3d(pcas): from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import import matplotlib.pyplot as plt fig=plt.figure() ax=fig.add_subplot(111,projection='3d') x=pcas[:,0] y=pcas[:,1] z=pcas[:,2]*0+np.min(pcas[:,2]) ax.scatter(x,y,z,color='tab:purple') x=pcas[:,0]*0+np.min(pcas[:,0]) y=pcas[:,1] z=pcas[:,2] ax.scatter(x,y,z,color='tab:pink') x=pcas[:,0] y=pcas[:,1]*0+np.max(pcas[:,1]) z=pcas[:,2] ax.scatter(x,y,z,color='tab:olive') ax.set_xlabel('Color Indices PC1') ax.set_ylabel('Color Indices PC2') ax.set_zlabel('Color Indices PC3') # plt.show() plt.savefig('3dplot_PC.png') def partialoneband(filter): global displaybandarray,originpcabands global pcbuttons global nonzero_vector,partialpca partialpca=True bands=Multiimagebands[currentfilename].bands channel,fea_l,fea_w=bands.shape nonzero=np.where(filter!=0) RGB_vector=np.zeros((displayfea_l*displayfea_w,3)) colorindex_vector=np.zeros((displayfea_l*displayfea_w,12)) filter_vector=filter.reshape((displayfea_l*displayfea_w),1)[:,0] originbands={} displays={} Red=cv2.resize(bands[0,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero] Green=cv2.resize(bands[0,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero] # Red=cv2.adaptiveThreshold(Red,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2) # Green=cv2.adaptiveThreshold(Green,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2) Blue=cv2.resize(bands[0,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero] # Blue=cv2.threshold(Blue,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) fillpartialbands(RGB_vector,0,Red,filter_vector) fillpartialbands(RGB_vector,1,Green,filter_vector) fillpartialbands(RGB_vector,2,Blue,filter_vector) PAT_R=Red PAT_G=Red PAT_B=Red ROO_R=Red ROO_G=Red ROO_B=Red DIF_R=Red DIF_G=Red DIF_B=Red GLD_R=Red GLD_G=Red GLD_B=Red fillpartialbands(colorindex_vector,0,PAT_R,filter_vector) fillpartialbands(colorindex_vector,1,PAT_G,filter_vector) fillpartialbands(colorindex_vector,2,PAT_B,filter_vector) fillpartialbands(colorindex_vector,3,ROO_R,filter_vector) fillpartialbands(colorindex_vector,4,ROO_G,filter_vector) fillpartialbands(colorindex_vector,5,ROO_B,filter_vector) fillpartialbands(colorindex_vector,6,DIF_R,filter_vector) fillpartialbands(colorindex_vector,7,DIF_G,filter_vector) fillpartialbands(colorindex_vector,8,DIF_B,filter_vector) fillpartialbands(colorindex_vector,9,GLD_R,filter_vector) fillpartialbands(colorindex_vector,10,GLD_G,filter_vector) fillpartialbands(colorindex_vector,11,GLD_B,filter_vector) nonzero_vector=np.where(filter_vector!=0) displayfea_vector=np.concatenate((RGB_vector,colorindex_vector),axis=1) featurechannel=14 # np.savetxt('color-index.csv',displayfea_vector,delimiter=',',fmt='%10.5f') # displayfea_vector=np.concatenate((RGB_vector,colorindex_vector),axis=1) originpcabands.update({currentfilename:displayfea_vector}) pcabandsdisplay=displayfea_vector[:,:14] pcabandsdisplay=pcabandsdisplay.reshape(displayfea_l,displayfea_w,featurechannel) tempdictdisplay={'LabOstu':pcabandsdisplay} displaybandarray.update({currentfilename:tempdictdisplay}) # originbandarray.update({currentfilename:originbands}) # Red=displays['Band1'] # Green=displays['Band2'] # Blue=displays['Band3'] # convimg=np.zeros((Red.shape[0],Red.shape[1],3)) # convimg[:,:,0]=Red # convimg[:,:,1]=Green # convimg[:,:,2]=Blue # convimg=Image.fromarray(convimg.astype('uint8')) # convimg.save('convimg.png','PNG') pcbuttons=[] need_w=int(450/3) need_h=int(400/4) for i in range(2,3): band=np.copy(pcabandsdisplay[:,:,i]) # imgband=(band-band.min())*255/(band.max()-band.min()) imgband=np.copy(band) pcimg=Image.fromarray(imgband.astype('uint8'),'L') # pcimg.save('pc'+'_'+str(i)+'.png',"PNG") pcimg.thumbnail((need_w,need_h),Image.ANTIALIAS) # pcimg.save('pc'+'_'+str(i)+'.png',"PNG") # ratio=max(displayfea_l/need_h,displayfea_w/need_w) # print('origin band range',band.max(),band.min()) # # band,cache=tkintercorestat.pool_forward(band,{"f":int(ratio),"stride":int(ratio)}) # band=cv2.resize(band,(need_w,need_h),interpolation=cv2.INTER_LINEAR) # bandrange=band.max()-band.min() # print('band range',band.max(),band.min()) # band=(band-band.min())/bandrange*255 # print('button img range',band.max(),band.min()) # buttonimg=Image.fromarray(band.astype('uint8'),'L') pcbuttons.append(ImageTk.PhotoImage(pcimg)) def partialsingleband(filter): global displaybandarray,originpcabands global pcbuttons global nonzero_vector,partialpca partialpca=True bands=Multiimagebands[currentfilename].bands channel,fea_l,fea_w=bands.shape nonzero=np.where(filter!=0) RGB_vector=np.zeros((displayfea_l*displayfea_w,3)) colorindex_vector=np.zeros((displayfea_l*displayfea_w,12)) filter_vector=filter.reshape((displayfea_l*displayfea_w),1)[:,0] originbands={} displays={} if channel==1: # Red=cv2.resize(bands[0,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero] # Green=cv2.resize(bands[0,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero] # Blue=cv2.resize(bands[0,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero] # fillpartialbands(RGB_vector,0,Red,filter_vector) # fillpartialbands(RGB_vector,1,Green,filter_vector) # fillpartialbands(RGB_vector,2,Blue,filter_vector) partialoneband(filter) return else: Red=cv2.resize(bands[0,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero] Green=cv2.resize(bands[1,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero] Blue=cv2.resize(bands[2,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero] fillpartialbands(RGB_vector,0,Red,filter_vector) fillpartialbands(RGB_vector,1,Green,filter_vector) fillpartialbands(RGB_vector,2,Blue,filter_vector) PAT_R=Red/(Red+Green) PAT_G=Green/(Green+Blue) PAT_B=Blue/(Blue+Red) ROO_R=Red/Green ROO_G=Green/Blue ROO_B=Blue/Red DIF_R=2*Red-Green-Blue DIF_G=2*Green-Blue-Red DIF_B=2*Blue-Red-Green GLD_R=Red/(np.multiply(np.power(Blue,0.618),np.power(Green,0.382))) GLD_G=Green/(np.multiply(np.power(Blue,0.618),np.power(Red,0.382))) GLD_B=Blue/(np.multiply(np.power(Green,0.618),np.power(Red,0.382))) fillpartialbands(colorindex_vector,0,PAT_R,filter_vector) fillpartialbands(colorindex_vector,1,PAT_G,filter_vector) fillpartialbands(colorindex_vector,2,PAT_B,filter_vector) fillpartialbands(colorindex_vector,3,ROO_R,filter_vector) fillpartialbands(colorindex_vector,4,ROO_G,filter_vector) fillpartialbands(colorindex_vector,5,ROO_B,filter_vector) fillpartialbands(colorindex_vector,6,DIF_R,filter_vector) fillpartialbands(colorindex_vector,7,DIF_G,filter_vector) fillpartialbands(colorindex_vector,8,DIF_B,filter_vector) fillpartialbands(colorindex_vector,9,GLD_R,filter_vector) fillpartialbands(colorindex_vector,10,GLD_G,filter_vector) fillpartialbands(colorindex_vector,11,GLD_B,filter_vector) for i in range(12): perc=np.percentile(colorindex_vector[:,i],1) print('perc',perc) colorindex_vector[:,i]=np.where(colorindex_vector[:,i]<perc,perc,colorindex_vector[:,i]) perc=np.percentile(colorindex_vector[:,i],99) print('perc',perc) colorindex_vector[:,i]=np.where(colorindex_vector[:,i]>perc,perc,colorindex_vector[:,i]) for i in range(3): perc=np.percentile(RGB_vector[:,i],1) print('perc',perc) RGB_vector[:,i]=np.where(RGB_vector[:,i]<perc,perc,RGB_vector[:,i]) perc=np.percentile(RGB_vector[:,i],99) print('perc',perc) RGB_vector[:,i]=np.where(RGB_vector[:,i]>perc,perc,RGB_vector[:,i]) nonzero_vector=np.where(filter_vector!=0) rgb_M=np.mean(RGB_vector[nonzero_vector,:].T,axis=1) colorindex_M=np.mean(colorindex_vector[nonzero_vector,:].T,axis=1) print('rgb_M',rgb_M,'colorindex_M',colorindex_M) rgb_C=RGB_vector[nonzero_vector,:][0]-rgb_M.T colorindex_C=colorindex_vector[nonzero_vector,:][0]-colorindex_M.T rgb_V=np.corrcoef(rgb_C.T) color_V=np.corrcoef(colorindex_C.T) nans=np.isnan(color_V) color_V[nans]=1e-6 rgb_std=rgb_C/(np.std(RGB_vector[nonzero_vector,:].T,axis=1)).T color_std=colorindex_C/(np.std(colorindex_vector[nonzero_vector,:].T,axis=1)).T nans=np.isnan(color_std) color_std[nans]=1e-6 rgb_eigval,rgb_eigvec=np.linalg.eig(rgb_V) color_eigval,color_eigvec=np.linalg.eig(color_V) print('rgb_eigvec',rgb_eigvec) print('color_eigvec',color_eigvec) featurechannel=12 pcabands=np.zeros((colorindex_vector.shape[0],featurechannel)) rgbbands=np.zeros((colorindex_vector.shape[0],3)) for i in range(0,9): pcn=color_eigvec[:,i] pcnbands=np.dot(color_std,pcn) pcvar=np.var(pcnbands) print('color index pc',i+1,'var=',pcvar) pcabands[nonzero_vector,i]=pcabands[nonzero_vector,i]+pcnbands for i in range(9,12): pcn=rgb_eigvec[:,i-9] pcnbands=np.dot(rgb_std,pcn) pcvar=np.var(pcnbands) print('rgb pc',i-9+1,'var=',pcvar) pcabands[nonzero_vector,i]=pcabands[nonzero_vector,i]+pcnbands rgbbands[nonzero_vector,i-9]=rgbbands[nonzero_vector,i-9]+pcnbands # plot3d(pcabands) # np.savetxt('rgb.csv',rgbbands,delimiter=',',fmt='%10.5f') # pcabands[:,1]=np.copy(pcabands[:,1]) # pcabands[:,2]=pcabands[:,2]*0 # indexbands=np.zeros((colorindex_vector.shape[0],3)) # if i<5: # indexbands[:,i-2]=indexbands[:,i-2]+pcnbands for i in range(12): perc=np.percentile(pcabands[:,i],1) print('perc',perc) pcabands[:,i]=np.where(pcabands[:,i]<perc,perc,pcabands[:,i]) perc=np.percentile(pcabands[:,i],99) print('perc',perc) pcabands[:,i]=np.where(pcabands[:,i]>perc,perc,pcabands[:,i]) '''save to csv''' # indexbands[:,0]=indexbands[:,0]+pcabands[:,2] # indexbands[:,1]=indexbands[:,1]+pcabands[:,3] # indexbands[:,2]=indexbands[:,2]+pcabands[:,4] # plot3d(indexbands) # np.savetxt('pcs.csv',pcabands,delimiter=',',fmt='%10.5f') displayfea_vector=np.concatenate((RGB_vector,colorindex_vector),axis=1) # np.savetxt('color-index.csv',displayfea_vector,delimiter=',',fmt='%10.5f') # displayfea_vector=np.concatenate((RGB_vector,colorindex_vector),axis=1) originpcabands.update({currentfilename:displayfea_vector}) pcabandsdisplay=pcabands.reshape(displayfea_l,displayfea_w,featurechannel) tempdictdisplay={'LabOstu':pcabandsdisplay} displaybandarray.update({currentfilename:tempdictdisplay}) # originbandarray.update({currentfilename:originbands}) # Red=displays['Band1'] # Green=displays['Band2'] # Blue=displays['Band3'] # convimg=np.zeros((Red.shape[0],Red.shape[1],3)) # convimg[:,:,0]=Red # convimg[:,:,1]=Green # convimg[:,:,2]=Blue # convimg=Image.fromarray(convimg.astype('uint8')) # convimg.save('convimg.png','PNG') pcbuttons=[] need_w=int(450/3) need_h=int(400/4) for i in range(12): band=np.copy(pcabandsdisplay[:,:,i]) imgband=(band-band.min())*255/(band.max()-band.min()) pcimg=Image.fromarray(imgband.astype('uint8'),'L') # pcimg.save('pc'+'_'+str(i)+'.png',"PNG") pcimg.thumbnail((need_w,need_h),Image.ANTIALIAS) # pcimg.save('pc'+'_'+str(i)+'.png',"PNG") # ratio=max(displayfea_l/need_h,displayfea_w/need_w) # print('origin band range',band.max(),band.min()) # # band,cache=tkintercorestat.pool_forward(band,{"f":int(ratio),"stride":int(ratio)}) # band=cv2.resize(band,(need_w,need_h),interpolation=cv2.INTER_LINEAR) # bandrange=band.max()-band.min() # print('band range',band.max(),band.min()) # band=(band-band.min())/bandrange*255 # print('button img range',band.max(),band.min()) # buttonimg=Image.fromarray(band.astype('uint8'),'L') pcbuttons.append(ImageTk.PhotoImage(pcimg)) def oneband(file): global displaybandarray,originbandarray,originpcabands,displayfea_l,displayfea_w global pcbuttons global partialpca partialpca=False try: bands=Multiimagebands[file].bands except: return pcbuttons=[] channel,fea_l,fea_w=bands.shape print('bandsize',fea_l,fea_w) if fea_l*fea_w>2000*2000: ratio=findratio([fea_l,fea_w],[2000,2000]) else: ratio=1 print('ratio',ratio) originbands={} displays={} displaybands=cv2.resize(bands[0,:,:],(int(fea_w/ratio),int(fea_l/ratio)),interpolation=cv2.INTER_LINEAR) displayfea_l,displayfea_w=displaybands.shape RGB_vector=np.zeros((displayfea_l*displayfea_w,3)) colorindex_vector=np.zeros((displayfea_l*displayfea_w,12)) Red=bands[0,:,:].astype('uint8') # _,Red=cv2.threshold(Red,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) Green=bands[0,:,:].astype('uint8') # _,Green=cv2.threshold(Green,0,255,cv2.THRESH_OTSU) Blue=bands[0,:,:].astype('uint8') # _,Blue=cv2.threshold(Blue,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) fillbands(originbands,displays,RGB_vector,0,'Band1',Red) fillbands(originbands,displays,RGB_vector,1,'Band2',Green) fillbands(originbands,displays,RGB_vector,2,'Band3',Blue) PAT_R=bands[0,:,:].astype('uint8') # PAT_R=cv2.adaptiveThreshold(PAT_R,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2) PAT_G=bands[0,:,:] # PAT_G=cv2.adaptiveThreshold(PAT_G,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2) PAT_B=bands[0,:,:] ROO_R=bands[0,:,:] ROO_G=bands[0,:,:] ROO_B=bands[0,:,:] DIF_R=bands[0,:,:] DIF_G=bands[0,:,:] DIF_B=bands[0,:,:] GLD_R=bands[0,:,:] GLD_G=bands[0,:,:] GLD_B=bands[0,:,:] fillbands(originbands,displays,colorindex_vector,0,'PAT_R',PAT_R) fillbands(originbands,displays,colorindex_vector,1,'PAT_G',PAT_G) fillbands(originbands,displays,colorindex_vector,2,'PAT_B',PAT_B) fillbands(originbands,displays,colorindex_vector,3,'ROO_R',ROO_R) fillbands(originbands,displays,colorindex_vector,4,'ROO_G',ROO_G) fillbands(originbands,displays,colorindex_vector,5,'ROO_B',ROO_B) fillbands(originbands,displays,colorindex_vector,6,'DIF_R',DIF_R) fillbands(originbands,displays,colorindex_vector,7,'DIF_G',DIF_G) fillbands(originbands,displays,colorindex_vector,8,'DIF_B',DIF_B) fillbands(originbands,displays,colorindex_vector,9,'GLD_R',GLD_R) fillbands(originbands,displays,colorindex_vector,10,'GLD_G',GLD_G) fillbands(originbands,displays,colorindex_vector,11,'GLD_B',GLD_B) displayfea_vector=np.concatenate((RGB_vector,colorindex_vector),axis=1) # np.savetxt('color-index.csv',displayfea_vector,delimiter=',',fmt='%10.5f') featurechannel=14 originpcabands.update({file:displayfea_vector}) # pcabandsdisplay=pcabands.reshape(displayfea_l,displayfea_w,featurechannel) # pcabandsdisplay=np.concatenate((RGB_vector,colorindex_vector),axis=2) pcabandsdisplay=displayfea_vector[:,:14] pcabandsdisplay=pcabandsdisplay.reshape(displayfea_l,displayfea_w,featurechannel) tempdictdisplay={'LabOstu':pcabandsdisplay} displaybandarray.update({file:tempdictdisplay}) originbandarray.update({file:originbands}) # Red=displays['Band1'] # Green=displays['Band2'] # Blue=displays['Band3'] # convimg=np.zeros((Red.shape[0],Red.shape[1],3)) # convimg[:,:,0]=Red # convimg[:,:,1]=Green # convimg[:,:,2]=Blue # convimg=Image.fromarray(convimg.astype('uint8')) # convimg.save('convimg.png','PNG') need_w=int(450/3) need_h=int(400/4) for i in range(2,3): band=np.copy(pcabandsdisplay[:,:,i]) # band=np.copy(Red) # imgband=(band-band.min())*255/(band.max()-band.min()) imgband=np.copy(band) pcimg=Image.fromarray(imgband.astype('uint8'),'L') # pcimg.save('pc'+'_'+str(i)+'.png',"PNG") pcimg.thumbnail((need_w,need_h),Image.ANTIALIAS) # pcimg.save('pc'+'_'+str(i)+'.png',"PNG") # ratio=max(displayfea_l/need_h,displayfea_w/need_w) # print('origin band range',band.max(),band.min()) # # band,cache=tkintercorestat.pool_forward(band,{"f":int(ratio),"stride":int(ratio)}) # band=cv2.resize(band,(need_w,need_h),interpolation=cv2.INTER_LINEAR) # bandrange=band.max()-band.min() # print('band range',band.max(),band.min()) # band=(band-band.min())/bandrange*255 # print('button img range',band.max(),band.min()) # buttonimg=Image.fromarray(band.astype('uint8'),'L') pcbuttons.append(ImageTk.PhotoImage(pcimg)) def singleband(file): global displaybandarray,originbandarray,originpcabands,displayfea_l,displayfea_w global pcbuttons global partialpca partialpca=False try: bands=Multiimagebands[file].bands except: return pcbuttons=[] channel,fea_l,fea_w=bands.shape print('bandsize',fea_l,fea_w) if fea_l*fea_w>2000*2000: ratio=findratio([fea_l,fea_w],[2000,2000]) else: ratio=1 print('ratio',ratio) originbands={} displays={} displaybands=cv2.resize(bands[0,:,:],(int(fea_w/ratio),int(fea_l/ratio)),interpolation=cv2.INTER_LINEAR) # displaybands=np.copy(bands[0,:,:]) displayfea_l,displayfea_w=displaybands.shape # displayfea_l,displayfea_w=fea_l,fea_w print(displayfea_l,displayfea_w) RGB_vector=np.zeros((displayfea_l*displayfea_w,3)) colorindex_vector=np.zeros((displayfea_l*displayfea_w,12)) if channel==1: # Red=bands[0,:,:] # Green=bands[0,:,:] # Blue=bands[0,:,:] oneband(file) return else: Red=bands[0,:,:] Green=bands[1,:,:] Blue=bands[2,:,:] fillbands(originbands,displays,RGB_vector,0,'Band1',Red) fillbands(originbands,displays,RGB_vector,1,'Band2',Green) fillbands(originbands,displays,RGB_vector,2,'Band3',Blue) # import matplotlib.pyplot as plt # fig,axs=plt.subplots(1,3) # for i in range(3): # minpc2=np.min(RGB_vector[:,i]) # maxpc2=np.max(RGB_vector[:,i]) # print(minpc2,maxpc2) # bins=range(int(minpc2),int(maxpc2),10) # axs[i].hist(RGB_vector[:,i],bins,range=(minpc2,maxpc2)) # axs[i].set_title('RGBband_'+str(i+1)) # # plt.hist(pcabands[:,13],bins,range=(minpc2,maxpc2)) # plt.show() # secondsmallest_R=np.partition(Red,1)[1][0] # secondsmallest_G=np.partition(Green,1)[1][0] # secondsmallest_B=np.partition(Blue,1)[1][0] # # Red=Red+secondsmallest_R # Green=Green+secondsmallest_G # Blue=Blue+secondsmallest_B # Red=Red/255+1 # Green=Green/255+1 # Blue=Blue/255+1 PAT_R=Red/(Red+Green) PAT_G=Green/(Green+Blue) PAT_B=Blue/(Blue+Red) ROO_R=Red/(Green+1e-6) ROO_G=Green/(Blue+1e-6) ROO_B=Blue/(Red+1e-6) DIF_R=2*Red-Green-Blue DIF_G=2*Green-Blue-Red DIF_B=2*Blue-Red-Green GLD_R=Red/(np.multiply(np.power(Blue,0.618),np.power(Green,0.382))+1e-6) GLD_G=Green/(np.multiply(np.power(Blue,0.618),np.power(Red,0.382))+1e-6) GLD_B=Blue/(np.multiply(np.power(Green,0.618),np.power(Red,0.382))+1e-6) fillbands(originbands,displays,colorindex_vector,0,'PAT_R',PAT_R) fillbands(originbands,displays,colorindex_vector,1,'PAT_G',PAT_G) fillbands(originbands,displays,colorindex_vector,2,'PAT_B',PAT_B) fillbands(originbands,displays,colorindex_vector,3,'ROO_R',ROO_R) fillbands(originbands,displays,colorindex_vector,4,'ROO_G',ROO_G) fillbands(originbands,displays,colorindex_vector,5,'ROO_B',ROO_B) fillbands(originbands,displays,colorindex_vector,6,'DIF_R',DIF_R) fillbands(originbands,displays,colorindex_vector,7,'DIF_G',DIF_G) fillbands(originbands,displays,colorindex_vector,8,'DIF_B',DIF_B) fillbands(originbands,displays,colorindex_vector,9,'GLD_R',GLD_R) fillbands(originbands,displays,colorindex_vector,10,'GLD_G',GLD_G) fillbands(originbands,displays,colorindex_vector,11,'GLD_B',GLD_B) # for i in [5,11]: # colorindex_vector[:,i]=np.log10(colorindex_vector[:,i]) # perc=np.percentile(colorindex_vector[:,i],99) # print('perc',perc) # colorindex_vector[:,i]=np.where(colorindex_vector[:,i]>perc,perc,colorindex_vector[:,i]) # # for i in [0,1,3,4,9,10]: # colorindex_vector[:,i]=np.log10(colorindex_vector[:,i]) # perc=np.percentile(colorindex_vector[:,i],90) # print('perc',perc) # colorindex_vector[:,i]=np.where(colorindex_vector[:,i]>perc,perc,colorindex_vector[:,i]) # for i in [5,11]: # colorindex_vector[:,i]=np.log10(colorindex_vector[:,i]) # perc=np.percentile(colorindex_vector[:,i],99) # print('perc',perc) # colorindex_vector[:,i]=np.where(colorindex_vector[:,i]>perc,perc,colorindex_vector[:,i]) # # for i in [3,4,9,10]: # colorindex_vector[:,i]=np.log10(colorindex_vector[:,i]) # perc=np.percentile(colorindex_vector[:,i],1) # print('perc',perc) # colorindex_vector[:,i]=np.where(colorindex_vector[:,i]<perc,perc,colorindex_vector[:,i]) # perc=np.percentile(colorindex_vector[:,i],99) # print('perc',perc) # colorindex_vector[:,i]=np.where(colorindex_vector[:,i]>perc,perc,colorindex_vector[:,i]) # # for i in [0,1]: # colorindex_vector[:,i]=np.log10(colorindex_vector[:,i]) # perc=np.percentile(colorindex_vector[:,i],2) # print('perc',perc) # colorindex_vector[:,i]=np.where(colorindex_vector[:,i]<perc,perc,colorindex_vector[:,i]) # for i in [0,1,3,4,9,10]: # colorindex_vector[:,i]=np.log10(colorindex_vector[:,i]) for i in range(12): perc=np.percentile(colorindex_vector[:,i],1) print('perc',perc) colorindex_vector[:,i]=np.where(colorindex_vector[:,i]<perc,perc,colorindex_vector[:,i]) perc=np.percentile(colorindex_vector[:,i],99) print('perc',perc) colorindex_vector[:,i]=np.where(colorindex_vector[:,i]>perc,perc,colorindex_vector[:,i]) for i in range(3): perc=np.percentile(RGB_vector[:,i],1) print('perc',perc) RGB_vector[:,i]=np.where(RGB_vector[:,i]<perc,perc,RGB_vector[:,i]) perc=np.percentile(RGB_vector[:,i],99) print('perc',perc) RGB_vector[:,i]=np.where(RGB_vector[:,i]>perc,perc,RGB_vector[:,i]) # import matplotlib.pyplot as plt # fig,axs=plt.subplots(4,3) # for i in range(12): # minpc2=np.min(colorindex_vector[:,i]) # maxpc2=np.max(colorindex_vector[:,i]) # print(minpc2,maxpc2) # # bins=range(int(minpc2),int(maxpc2)+1,10) # axs[int(i/3),i%3].hist(colorindex_vector[:,i],10,range=(minpc2,maxpc2)) # axs[int(i/3),i%3].set_title('Colorindex_'+str(i+1)) # # axs[i].hist(colorindex_vector[:,i],10,range=(minpc2,maxpc2)) # # axs[i].set_title('Colorindex_'+str(i+1)) # # plt.hist(pcabands[:,13],bins,range=(minpc2,maxpc2)) # plt.show() rgb_M=np.mean(RGB_vector.T,axis=1) colorindex_M=np.mean(colorindex_vector.T,axis=1) print('rgb_M',rgb_M,'colorindex_M',colorindex_M) rgb_C=RGB_vector-rgb_M colorindex_C=colorindex_vector-colorindex_M rgb_V=np.corrcoef(rgb_C.T) color_V=np.corrcoef(colorindex_C.T) nans=np.isnan(color_V) color_V[nans]=1e-6 rgb_std=rgb_C/np.std(RGB_vector.T,axis=1) color_std=colorindex_C/np.std(colorindex_vector.T,axis=1) nans=np.isnan(color_std) color_std[nans]=1e-6 rgb_eigval,rgb_eigvec=np.linalg.eig(rgb_V) color_eigval,color_eigvec=np.linalg.eig(color_V) print('rgb_eigvec',rgb_eigvec) print('color_eigvec',color_eigvec) featurechannel=12 pcabands=np.zeros((colorindex_vector.shape[0],featurechannel)) rgbbands=np.zeros((colorindex_vector.shape[0],3)) # plot3d(pcabands) # np.savetxt('rgb.csv',rgbbands,delimiter=',',fmt='%10.5f') # pcabands[:,1]=np.copy(pcabands[:,1]) # pcabands[:,2]=pcabands[:,2]*0 indexbands=np.zeros((colorindex_vector.shape[0],3)) # for i in range(3,featurechannel): # csvpcabands=np.zeros((colorindex_vector.shape[0],15)) for i in range(0,9): pcn=color_eigvec[:,i] pcnbands=np.dot(color_std,pcn) pcvar=np.var(pcnbands) print('color index pc',i+1,'var=',pcvar) pcabands[:,i]=pcabands[:,i]+pcnbands # if i<5: # indexbands[:,i-2]=indexbands[:,i-2]+pcnbands for i in range(9,12): pcn=rgb_eigvec[:,i-9] pcnbands=np.dot(rgb_std,pcn) pcvar=np.var(pcnbands) print('rgb pc',i+1,'var=',pcvar) pcabands[:,i]=pcabands[:,i]+pcnbands rgbbands[:,i-9]=rgbbands[:,i-9]+pcnbands # for i in range(0,12): # pcn=color_eigvec[:,i] # pcnbands=np.dot(color_std,pcn) # pcvar=np.var(pcnbands) # print('csv color index pc',i+1,'var=',pcvar) # csvpcabands[:,i]=csvpcabands[:,i]+pcnbands # for i in range(12,15): # pcn=rgb_eigvec[:,i-12] # pcnbands=np.dot(rgb_std,pcn) # csvpcabands[:,i]=csvpcabands[:,i]+pcnbands # '''save to csv''' # indexbands[:,0]=indexbands[:,0]+pcabands[:,2] # indexbands[:,1]=indexbands[:,1]+pcabands[:,3] # indexbands[:,2]=indexbands[:,2]+pcabands[:,4] # plot3d(indexbands) # np.savetxt('pcs.csv',pcabands,delimiter=',',fmt='%10.5f') # minpc=np.min(pcabands) # # meanpc=np.mean(pcabands) # stdpc=np.std(pcabands) # print('meanpc',meanpc,'stdpc',stdpc) # pcabands=pcabands-meanpc/stdpc # import matplotlib.pyplot as plt # minpc2=np.min(pcabands[:,13]) # maxpc2=np.max(pcabands[:,13]) # print(minpc2,maxpc2) # bins=range(int(minpc2),int(maxpc2),10) # plt.hist(pcabands[:,13],bins,range=(minpc2,maxpc2)) # plt.show() # np.savetxt('pcs.csv',pcabands[:,3],delimiter=',',fmt='%10.5f') for i in range(12): perc=np.percentile(pcabands[:,i],1) print('perc',perc) pcabands[:,i]=np.where(pcabands[:,i]<perc,perc,pcabands[:,i]) perc=np.percentile(pcabands[:,i],99) print('perc',perc) pcabands[:,i]=np.where(pcabands[:,i]>perc,perc,pcabands[:,i]) # import matplotlib.pyplot as plt # fig,axs=plt.subplots(4,3) # for i in range(2,14): # minpc2=np.min(pcabands[:,i]) # maxpc2=np.max(pcabands[:,i]) # print(minpc2,maxpc2) # # bins=range(int(minpc2),int(maxpc2)+1,10) # axs[int((i-2)/3),(i-2)%3].hist(pcabands[:,i],10,range=(minpc2,maxpc2)) # axs[int((i-2)/3),(i-2)%3].set_title('PC_'+str(i-2+1)) # # axs[i].hist(colorindex_vector[:,i],10,range=(minpc2,maxpc2)) # # axs[i].set_title('Colorindex_'+str(i+1)) # # plt.hist(pcabands[:,13],bins,range=(minpc2,maxpc2)) # plt.show() # header=['R','G','B', # 'PAT_R','PAT_G','PAT_B', # 'DIF_R','DIF_G','DIF_B', # 'ROO_R','ROO_G','ROO_B', # 'GLD_R','GLD_G','GLD_B',] # displayfea_vector=np.concatenate((RGB_vector,colorindex_vector),axis=1) # with open('color-index.csv','w') as f: # writer=csv.writer(f) # writer.writerow(header) # for i in range(displayfea_vector.shape[0]): # writer.writerow(list(displayfea_vector[i,:])) # np.savetxt('color-index.csv',displayfea_vector,delimiter=',',fmt='%10.5f') displayfea_vector=np.concatenate((RGB_vector,colorindex_vector),axis=1) originpcabands.update({file:displayfea_vector}) pcabandsdisplay=pcabands.reshape(displayfea_l,displayfea_w,featurechannel) tempdictdisplay={'LabOstu':pcabandsdisplay} displaybandarray.update({file:tempdictdisplay}) originbandarray.update({file:originbands}) # Red=displays['Band1'] # Green=displays['Band2'] # Blue=displays['Band3'] # convimg=np.zeros((Red.shape[0],Red.shape[1],3)) # convimg[:,:,0]=Red # convimg[:,:,1]=Green # convimg[:,:,2]=Blue # convimg=Image.fromarray(convimg.astype('uint8')) # convimg.save('convimg.png','PNG') need_w=int(450/3) need_h=int(400/4) # pcdisplay=[3,4,5,6,7,8,9,10,11,0,1,2] # for i in range(2,featurechannel): for i in range(featurechannel): band=np.copy(pcabandsdisplay[:,:,i]) imgband=(band-band.min())*255/(band.max()-band.min()) pcimg=Image.fromarray(imgband.astype('uint8'),'L') # pcimg.save('pc'+'_'+str(i)+'.png',"PNG") pcimg.thumbnail((need_w,need_h),Image.ANTIALIAS) # pcimg.save('pc'+'_'+str(i)+'.png',"PNG") # ratio=max(displayfea_l/need_h,displayfea_w/need_w) # print('origin band range',band.max(),band.min()) # # band,cache=tkintercorestat.pool_forward(band,{"f":int(ratio),"stride":int(ratio)}) # band=cv2.resize(band,(need_w,need_h),interpolation=cv2.INTER_LINEAR) # bandrange=band.max()-band.min() # print('band range',band.max(),band.min()) # band=(band-band.min())/bandrange*255 # print('button img range',band.max(),band.min()) # buttonimg=Image.fromarray(band.astype('uint8'),'L') pcbuttons.append(ImageTk.PhotoImage(pcimg)) def colorindices_cal(file): global colorindicearray try: bands=Multiimagebands[file].bands except: return channel,fea_l,fea_w=bands.shape print('bandsize',fea_l,fea_w) if fea_l*fea_w>2000*2000: ratio=findratio([fea_l,fea_w],[2000,2000]) else: ratio=1 print('ratio',ratio) originbands={} displays={} # displaybands=cv2.resize(bands[0,:,:],(int(fea_w/ratio),int(fea_l/ratio)),interpolation=cv2.INTER_LINEAR) # displaybands=np.copy(bands[0,:,:]) # displayfea_l,displayfea_w=displaybands.shape # displayfea_l,displayfea_w=fea_l,fea_w print(displayfea_l,displayfea_w) colorindex_vector=np.zeros((displayfea_l*displayfea_w,7)) if channel==1: Red=bands[0,:,:] Green=bands[0,:,:] Blue=bands[0,:,:] else: Red=bands[0,:,:] Green=bands[1,:,:] Blue=bands[2,:,:] secondsmallest_R=np.partition(Red,1)[1][0] secondsmallest_G=np.partition(Green,1)[1][0] secondsmallest_B=np.partition(Blue,1)[1][0] Red=Red+secondsmallest_R Green=Green+secondsmallest_G Blue=Blue+secondsmallest_B NDI=128*((Green-Red)/(Green+Red)+1) VEG=Green/(np.power(Red,0.667)*np.power(Blue,(1-0.667))) Greenness=Green/(Green+Red+Blue) CIVE=0.44*Red+0.811*Green+0.385*Blue+18.7845 MExG=1.262*Green-0.844*Red-0.311*Blue NDRB=(Red-Blue)/(Red+Blue) NGRDI=(Green-Red)/(Green+Red) fillbands(originbands,displays,colorindex_vector,0,'NDI',NDI) fillbands(originbands,displays,colorindex_vector,1,'VEG',VEG) fillbands(originbands,displays,colorindex_vector,2,'Greenness',Greenness) fillbands(originbands,displays,colorindex_vector,3,'CIVE',CIVE) fillbands(originbands,displays,colorindex_vector,4,'MExG',MExG) fillbands(originbands,displays,colorindex_vector,5,'NDRB',NDRB) fillbands(originbands,displays,colorindex_vector,6,'NGRDI',NGRDI) colorindicearray.update({file:originbands}) def singleband_oldversion(file): global displaybandarray,originbandarray,originpcabands,displayfea_l,displayfea_w global pcbuttons try: bands=Multigraybands[file].bands except: return pcbuttons=[] bandsize=Multigraybands[file].size print('bandsize',bandsize) try: channel,height,width=bands.shape except: channel=0 if channel>1: bands=bands[0,:,:] #bands=cv2.GaussianBlur(bands,(3,3),cv2.BORDER_DEFAULT) ostu=filters.threshold_otsu(bands) bands=bands.astype('float32') bands=bands/ostu #display purpose if bandsize[0]*bandsize[1]>2000*2000: ratio=findratio([bandsize[0],bandsize[1]],[2000,2000]) else: ratio=1 print('ratio',ratio) #if bandsize[0]*bandsize[1]>850*850: # ratio=findratio([bandsize[0],bandsize[1]],[850,850]) #else: # ratio=1 #ttestbands=np.copy(bands) #testdisplaybands=cv2.resize(ttestbands,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR) #testdisplaybands=cv2.resize(testdisplaybands,(int(resizeshape[0]),int(resizeshape[1])),interpolation=cv2.INTER_LINEAR) #print('testdisplaybands size',testdisplaybands.size) #if bandsize[0]*bandsize[1]>850*850: # ratio=findratio([bandsize[0],bandsize[1]],[850,850]) #else: # ratio=1 originbands={} displays={} fea_l,fea_w=bands.shape # fea_vector=np.zeros((fea_l*fea_w,3)) pyplt.imsave('bands.png',bands) displaybands=cv2.resize(bands,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR) pyplt.imsave('displaybands.png',displaybands) displayfea_l,displayfea_w=displaybands.shape fea_vector=np.zeros((displayfea_l*displayfea_w,3)) displayfea_vector=np.zeros((displayfea_l*displayfea_w,7)) colorfea_vector=np.zeros((displayfea_l*displayfea_w,7)) # originfea_vector=np.zeros((bandsize[0],bandsize[1],10)) # saveimg=np.copy(bands).astype('uint8') # pyplt.imsave('ostuimg.png',saveimg) if 'LabOstu' not in originbands: originbands.update({'LabOstu':bands}) fea_bands=bands.reshape(fea_l*fea_w,1)[:,0] # originfea_vector[:,9]=originfea_vector[:,0]+fea_bands displayfea_bands=displaybands.reshape((displayfea_l*displayfea_w),1)[:,0] # fea_vector[:,9]=fea_vector[:,0]+fea_bands displayfea_vector[:,6]=displayfea_vector[:,6]+displayfea_bands minv=displayfea_bands.min() maxv=displayfea_bands.max() fearange=maxv-minv colorfeabands=displayfea_bands-minv colorfeabands=colorfeabands/fearange*255 colorfea_vector[:,6]=colorfea_vector[:,6]+colorfeabands #displaybands=displaybands.reshape((int(bandsize[1]/ratio),int(bandsize[0]/ratio),3)) #kernel=np.ones((2,2),np.float32)/4 #displaybands=np.copy(bands) displays.update({'LabOstu':displaybands}) #displaybandarray.update({'LabOstu':cv2.filter2D(displaybands,-1,kernel)}) bands=Multiimagebands[file].bands #for i in range(3): # bands[i,:,:]=cv2.GaussianBlur(bands[i,:,:],(3,3),cv2.BORDER_DEFAULT) NDI=128*((bands[1,:,:]-bands[0,:,:])/(bands[1,:,:]+bands[0,:,:])+1) tempdict={'NDI':NDI} # saveimg=np.copy(NDI).astype('uint8') # pyplt.imsave('NDIimg.png',saveimg) if 'NDI' not in originbands: originbands.update(tempdict) displaybands=cv2.resize(NDI,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR) fea_bands=NDI.reshape(fea_l*fea_w,1)[:,0] # originfea_vector[:,1]=originfea_vector[:,1]+fea_bands displayfea_bands=displaybands.reshape((displayfea_l*displayfea_w),1)[:,0] # fea_vector[:,1]=fea_vector[:,1]+fea_bands displayfea_vector[:,1]=displayfea_vector[:,1]+displayfea_bands minv=displayfea_bands.min() maxv=displayfea_bands.max() fearange=maxv-minv colorfeabands=displayfea_bands-minv colorfeabands=colorfeabands/fearange*255 colorfea_vector[:,1]=colorfea_vector[:,1]+colorfeabands #displaybands=np.copy(NDI) #kernel=np.ones((2,2),np.float32)/4 #displaydict={'NDI':cv2.filter2D(displaybands,-1,kernel)} displaydict={'NDI':displaybands} #displaydict=displaydict.reshape((int(bandsize[1]/ratio),int(bandsize[0]/ratio),3)) displays.update(displaydict) Red=bands[0,:,:] Green=bands[1,:,:] Blue=bands[2,:,:] tempdict={'Band1':Red} # saveimg=np.zeros((bandsize[0],bandsize[1],3),'uint8') # saveimg[:,:,0]=np.copy(Red).astype('uint8') # pyplt.imsave('Redimg.png',saveimg) # saveimg=np.zeros((bandsize[0],bandsize[1],3),'uint8') # saveimg[:,:,1]=np.copy(Green).astype('uint8') # pyplt.imsave('Greenimg.png',saveimg) # saveimg=np.zeros((bandsize[0],bandsize[1],3),'uint8') # saveimg[:,:,2]=np.copy(Blue).astype('uint8') # pyplt.imsave('Blueimg.png',saveimg) if 'Band1' not in originbands: originbands.update(tempdict) image=cv2.resize(Red,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR) displaydict={'Band1':image} displays.update(displaydict) # fea_bands=Red.reshape(fea_l*fea_w,1)[:,0] fea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0] # originfea_vector[:,2]=originfea_vector[:,2]+fea_bands displayfea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0] fea_vector[:,0]=fea_vector[:,0]+fea_bands # displayfea_vector[:,2]=displayfea_vector[:,2]+displayfea_bands tempdict={'Band2':Green} if 'Band2' not in originbands: originbands.update(tempdict) image=cv2.resize(Green,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR) displaydict={'Band2':image} displays.update(displaydict) # fea_bands=Green.reshape(fea_l*fea_w,1)[:,0] fea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0] # originfea_vector[:,3]=originfea_vector[:,3]+fea_bands displayfea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0] fea_vector[:,1]=fea_vector[:,1]+fea_bands # displayfea_vector[:,3]=displayfea_vector[:,3]+displayfea_bands tempdict={'Band3':Blue} if 'Band3' not in originbands: originbands.update(tempdict) # originfea_vector[:,4]=originfea_vector[:,4]+Blue image=cv2.resize(Blue,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR) displaydict={'Band3':image} displays.update(displaydict) # fea_bands=Blue.reshape(fea_l*fea_w,1)[:,0] fea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0] displayfea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0] fea_vector[:,2]=fea_vector[:,2]+fea_bands # displayfea_vector[:,4]=displayfea_vector[:,4]+displayfea_bands Greenness = bands[1, :, :] / (bands[0, :, :] + bands[1, :, :] + bands[2, :, :]) tempdict = {'Greenness': Greenness} if 'Greenness' not in originbands: originbands.update(tempdict) # originfea_vector[:,5]=originfea_vector[:,5]+Greenness image=cv2.resize(Greenness,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR) #image=image.reshape((int(bandsize[1]/ratio),int(bandsize[0]/ratio),3)) displaydict={'Greenness':image} #displaybandarray.update(worktempdict) displays.update(displaydict) fea_bands=Greenness.reshape(fea_l*fea_w,1)[:,0] displayfea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0] # fea_vector[:,5]=fea_vector[:,5]+fea_bands displayfea_vector[:,2]=displayfea_vector[:,2]+displayfea_bands minv=displayfea_bands.min() maxv=displayfea_bands.max() fearange=maxv-minv colorfeabands=displayfea_bands-minv colorfeabands=colorfeabands/fearange*255 colorfea_vector[:,2]=colorfea_vector[:,2]+colorfeabands VEG=bands[1,:,:]/(np.power(bands[0,:,:],0.667)*np.power(bands[2,:,:],(1-0.667))) tempdict={'VEG':VEG} if 'VEG' not in originbands: originbands.update(tempdict) # originfea_vector[:,6]=originfea_vector[:,6]+VEG image=cv2.resize(VEG,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR) kernel=np.ones((4,4),np.float32)/16 #displaybandarray.update({'LabOstu':}) #image=image.reshape((int(bandsize[1]/ratio),int(bandsize[0]/ratio),3)) worktempdict={'VEG':cv2.filter2D(image,-1,kernel)} displays.update(worktempdict) fea_bands=VEG.reshape(fea_l*fea_w,1)[:,0] displayfea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0] # fea_vector[:,6]=fea_vector[:,6]+fea_bands displayfea_vector[:,3]=displayfea_vector[:,3]+displayfea_bands minv=displayfea_bands.min() maxv=displayfea_bands.max() fearange=maxv-minv colorfeabands=displayfea_bands-minv colorfeabands=colorfeabands/fearange*255 colorfea_vector[:,3]=colorfea_vector[:,3]+colorfeabands CIVE=0.441*bands[0,:,:]-0.811*bands[1,:,:]+0.385*bands[2,:,:]+18.78745 tempdict={'CIVE':CIVE} if 'CIVE' not in originbands: originbands.update(tempdict) # originfea_vector[:,7]=originfea_vector[:,7]+CIVE image=cv2.resize(CIVE,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR) #image=image.reshape((int(bandsize[1]/ratio),int(bandsize[0]/ratio),3)) worktempdict={'CIVE':image} displays.update(worktempdict) fea_bands=CIVE.reshape(fea_l*fea_w,1)[:,0] displayfea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0] # fea_vector[:,7]=fea_vector[:,7]+fea_bands displayfea_vector[:,4]=displayfea_vector[:,4]+displayfea_bands minv=displayfea_bands.min() maxv=displayfea_bands.max() fearange=maxv-minv colorfeabands=displayfea_bands-minv colorfeabands=colorfeabands/fearange*255 colorfea_vector[:,4]=colorfea_vector[:,4]+colorfeabands MExG=1.262*bands[1,:,:]-0.884*bands[0,:,:]-0.311*bands[2,:,:] tempdict={'MExG':MExG} if 'MExG' not in originbands: originbands.update(tempdict) # originfea_vector[:,8]=originfea_vector[:,8]+MExG image=cv2.resize(MExG,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR) #image=image.reshape((int(bandsize[1]/ratio),int(bandsize[0]/ratio),3)) worktempdict={'MExG':image} displays.update(worktempdict) fea_bands=MExG.reshape(fea_l*fea_w,1)[:,0] displayfea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0] # fea_vector[:,8]=fea_vector[:,8]+fea_bands displayfea_vector[:,5]=displayfea_vector[:,5]+displayfea_bands minv=displayfea_bands.min() maxv=displayfea_bands.max() fearange=maxv-minv colorfeabands=displayfea_bands-minv colorfeabands=colorfeabands/fearange*255 colorfea_vector[:,5]=colorfea_vector[:,5]+colorfeabands NDVI=(bands[0,:,:]-bands[2,:,:])/(bands[0,:,:]+bands[2,:,:]) tempdict={'NDVI':NDVI} if 'NDVI' not in originbands: originbands.update(tempdict) # originfea_vector[:,0]=originfea_vector[:,9]+NDVI image=cv2.resize(NDVI,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR) #image=image.reshape((int(bandsize[1]/ratio),int(bandsize[0]/ratio),3)) worktempdict={'NDVI':image} displays.update(worktempdict) fea_bands=NDVI.reshape(fea_l*fea_w,1)[:,0] displayfea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0] # fea_vector[:,0]=fea_vector[:,9]+fea_bands displayfea_vector[:,0]=displayfea_vector[:,0]+displayfea_bands minv=displayfea_bands.min() maxv=displayfea_bands.max() fearange=maxv-minv colorfeabands=displayfea_bands-minv colorfeabands=colorfeabands/fearange*255 colorfea_vector[:,0]=colorfea_vector[:,0]+colorfeabands NGRDI=(bands[1,:,:]-bands[0,:,:])/(bands[1,:,:]+bands[0,:,:]) tempdict={'NGRDI':NGRDI} if 'NGRDI' not in originbands: originbands.update(tempdict) image=cv2.resize(NGRDI,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR) #image=image.reshape((int(bandsize[1]/ratio),int(bandsize[0]/ratio),3)) worktempdict={'NGRDI':image} displays.update(worktempdict) if channel>=1: nirbands=Multigraybands[file].bands NDVI=(nirbands[0,:,:]-bands[1,:,:])/(nirbands[0,:,:]+bands[1,:,:]) tempdict={'NDVI':NDVI} #if 'NDVI' not in originbandarray: originbands.update(tempdict) image=cv2.resize(NDVI,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR) #image=image.reshape((int(bandsize[1]/ratio),int(bandsize[0]/ratio),3)) worktempdict={'NDVI':image} displays.update(worktempdict) '''PCA part''' displayfea_vector=np.concatenate((fea_vector,displayfea_vector),axis=1) M=np.mean(displayfea_vector.T,axis=1) OM=np.mean(fea_vector.T,axis=1) print('M',M,'M shape',M.shape, 'OM',OM,'OM Shape',OM.shape) C=displayfea_vector-M OC=fea_vector-OM #max=np.max(C.T,axis=1) #print('MAX',max) #C=C/max print('C',C,'OC',OC) #V=np.cov(C.T) V=np.corrcoef(C.T) OV=np.corrcoef(OC.T) std=np.std(displayfea_vector.T,axis=1) O_std=np.std(fea_vector.T,axis=1) print(std,O_std) std_displayfea=C/std O_stddisplayfea=OC/O_std print(std_displayfea,O_stddisplayfea) #eigvalues,eigvectors=np.linalg.eig(V) #n,m=displayfea_vector.shape #C=np.dot(displayfea_vector.T,displayfea_vector)/(n-1) V_var=np.cov(std_displayfea.T) print('COV',V_var) print('COR',V) eigvalues=la.eigvals(V_var) #eigvalues=np.linalg.eigvals(C) print('eigvalue',eigvalues) idx=np.argsort(eigvalues) print('idx',idx) eigvalues,eigvectors=np.linalg.eig(V) print('eigvalue',eigvalues) print('eigvectors',eigvectors) eigvalueperc={} featurechannel=10 # for i in range(len(eigvalues)): # print('percentage',i,eigvalues[i]/sum(eigvalues)) # eigvalueperc.update({i:eigvalues[i]/sum(eigvalues)}) # #if eigvalues[i]>0: # featurechannel+=1 # o_eigenvalue,o_eigenvector=np.linalg.eig(OV) pcabands=np.zeros((displayfea_vector.shape[0],featurechannel)) # o_pcabands=np.zeros((fea_vector.shape[0],featurechannel)) pcavar={} # # # # # separate PCs # # for i in range(3): # # pcn=o_eigenvector[:,i] # # pcnbands=np.dot(O_stddisplayfea,pcn) # # pcvar=np.var(pcnbands) # # print('pc',i+1,' var=',pcvar) # # pcabands[:,i]=pcabands[:,i]+pcnbands # # for i in range(7): # # pcn=eigvectors[:,i] # # pcnbands=np.dot(std_displayfea,pcn) # # pcvar=np.var(pcnbands) # # print('pc',i+1,' var=',pcvar) # # temppcavar={i:pcvar} # # pcavar.update(temppcavar) # # pcabands[:,i+3]=pcabands[:,i+3]+pcnbands # # # # # combined PCs for i in range(featurechannel): pcn=eigvectors[:,i] # pcnbands=np.dot(std_displayfea,pcn) pcnbands=np.dot(C,pcn) pcvar=np.var(pcnbands) print('pc',i+1,' var=',pcvar) temppcavar={i:pcvar} pcavar.update(temppcavar) pcabands[:,i]=pcabands[:,i]+pcnbands # ''' NO PCA''' # colorfea_vector=np.concatenate((fea_vector,colorfea_vector),axis=1) # displayfea_vector=np.concatenate((fea_vector,displayfea_vector),axis=1) # M=np.mean(colorfea_vector.T,axis=1) # print('colorfea_vector M',M) # pcabands=np.copy(colorfea_vector) # featurechannel=10 '''Export to CSV''' # np.savetxt('pcs.csv',pcabands,delimiter=',',fmt='%s') # np.savetxt('color-index.csv',displayfea_vector,delimiter=',',fmt='%s') #threedplot(pcabands) # originpcabands.update({file:o_pcabands}) originpcabands.update({file:displayfea_vector}) pcabandsdisplay=pcabands.reshape(displayfea_l,displayfea_w,featurechannel) #originbands={'LabOstu':pcabandsdisplay} tempdictdisplay={'LabOstu':pcabandsdisplay} #displaybandarray.update({file:displays}) displaybandarray.update({file:tempdictdisplay}) originbandarray.update({file:originbands}) need_w=int(450/4) need_h=int(400/3) for i in range(featurechannel): band=np.copy(pcabandsdisplay[:,:,i]) ratio=max(displayfea_l/need_h,displayfea_w/need_w) band,cache=tkintercorestat.pool_forward(band,{"f":int(ratio),"stride":int(ratio)}) bandrange=band.max()-band.min() band=(band-band.min())/bandrange*255 buttonimg=Image.fromarray(band.astype('uint8'),'L') pcbuttons.append(ImageTk.PhotoImage(buttonimg)) # buttonimg.save('pcbutton_'+str(i)+'.png',"PNG") # print('saved') from mpl_toolkits.mplot3d import Axes3D def threedplot(area): fig=pyplt.figure() ax=fig.add_subplot(111,projection='3d') n=100 xs=np.copy(area[0:n,0]) ys=np.copy(area[0:n,1]) zs=np.copy(area[0:n,3]) colors=("red","green","blue") groups=("PC1","PC2","PC3") #for c,l in [('r','o'),('g','^')]: ax.scatter(xs,ys,np.max(zs),c='r',marker='o') ax.scatter(xs,np.min(ys),zs,c='b',marker='^') ax.scatter(np.max(xs),ys,zs,c='g') ax.set_xlabel('PC1') ax.set_ylabel('PC2') ax.set_zlabel('PC3') pyplt.show() def changeimage(frame,filename): global clusterdisplay,currentfilename,resviewframe clusterdisplay={} currentfilename=filename print(filename) generatedisplayimg(filename) changedisplayimg(frame,'Origin') for key in cluster: tuplist=[] for i in range(len(cluster)): tuplist.append('') tup=tuple(tuplist) bandchoice[key].set(tup) #for key in cluster: # ch=ttk.Checkbutton(contentframe,text=key,variable=bandchoice[key],command=changecluster)#,command=partial(autosetclassnumber,clusternumberentry,bandchoice)) # ch.pack() if filename in multi_results.keys(): for widget in resviewframe.winfo_children(): widget.pack_forget() iternum=len(list(multi_results[filename][0].keys())) itervar=IntVar() itervar.set(iternum) resscaler=Scale(resviewframe,from_=1,to=iternum,tickinterval=1,length=220,orient=HORIZONTAL,variable=itervar,command=partial(changeoutputimg,filename)) resscaler.pack() outputbutton=Button(resviewframe,text='Export Results',command=partial(export_result,itervar)) outputbutton.pack() def generatecheckbox(frame,classnum): global checkboxdict,havecolorstrip changekmeansbar('') for widget in frame.winfo_children(): widget.pack_forget() checkboxdict={} havecolorstrip=False addcolorstrip() for i in range(10): dictkey=str(i+1) tempdict={dictkey:Variable()} tempdict[dictkey].set('0') checkboxdict.update(tempdict) ch=Checkbutton(checkboxframe,text=dictkey,variable=checkboxdict[dictkey],command=partial(changeclusterbox,''))#,command=partial(changecluster,'')) if i+1>int(kmeans.get()): ch.config(state=DISABLED) ch.pack(side=LEFT) #if i==0: # ch.invoke() #for i in range(int(classnum)): # dictkey='class '+str(i+1) # tempdict={dictkey:Variable()} # checkboxdict.update(tempdict) #ch=ttk.Checkbutton(frame,text=dictkey,command=partial(generateplant,checkboxdict,bandchoice,classnum),variable=checkboxdict[dictkey]) # ch=ttk.Checkbutton(frame,text=dictkey,command=changecluster,variable=checkboxdict[dictkey]) # ch.grid(row=int(i/3),column=int(i%3)) # if i==minipixelareaclass: # ch.invoke() def generateimgplant(event): global currentlabels,changekmeans,colordicesband,originbinaryimg,pre_checkbox colordicesband=np.copy(displaylabels) keys=checkboxdict.keys() plantchoice=[] pre_checkbox=[] for key in keys: plantchoice.append(checkboxdict[key].get()) pre_checkbox.append(checkboxdict[key].get()) origindisplaylabels=np.copy(displaybandarray[currentfilename]['LabOstu']) h,w,c=origindisplaylabels.shape # tempdisplayimg=np.zeros((displaybandarray[currentfilename]['LabOstu'].shape[0], # displaybandarray[currentfilename]['LabOstu'].shape[1])) # colordivimg=np.zeros((displaybandarray[currentfilename]['LabOstu'].shape[0], # displaybandarray[currentfilename]['LabOstu'].shape[1])) tempdisplayimg=np.zeros((h,w)) colordivimg=np.zeros((h,w)) sel_count=plantchoice.count('1') if sel_count == int(kmeans.get()): tempdisplayimg=tempdisplayimg+1 else: for i in range(int(kmeans.get())): tup=plantchoice[i] if '1' in tup: tempdisplayimg=np.where(displaylabels==i,1,tempdisplayimg) # uniquecolor=np.unique(tempdisplayimg) # if len(uniquecolor)==1 and uniquecolor[0]==1: # tempdisplayimg=np.copy(displaylabels).astype('float32') currentlabels=np.copy(tempdisplayimg) originbinaryimg=np.copy(tempdisplayimg) tempcolorimg=np.copy(displaylabels).astype('float32') # ratio=findratio([h,w],[850,850]) # if h*w<850*850: # tempdisplayimg=cv2.resize(tempdisplayimg,(int(w*ratio),int(h*ratio))) # colordivimg=cv2.resize(tempcolorimg,(int(w*ratio),int(h*ratio))) # if h>850: # ratio=round(h/850) # tempdisplayimg=cv2.resize(tempdisplayimg,(int(w/ratio),int(h/ratio))) # colordivimg=cv2.resize(tempcolorimg,(int(w/ratio),int(h/ratio))) # if w>850: # ratio=round(w/850) # tempdisplayimg=cv2.resize(tempdisplayimg,(int(w/ratio),int(h/ratio))) # colordivimg=cv2.resize(tempcolorimg,(int(w/ratio),int(h/ratio))) # else: # tempdisplayimg=cv2.resize(tempdisplayimg,(int(w/ratio),int(h/ratio))) # colordivimg=cv2.resize(tempcolorimg,(int(w/ratio),int(h/ratio))) # tempdisplayimg=cv2.resize(tempdisplayimg,(int(resizeshape[0]),int(resizeshape[1]))) # colordivimg=cv2.resize(tempcolorimg,(int(resizeshape[0]),int(resizeshape[1]))) colordivimg=np.copy(tempcolorimg) binaryimg=np.zeros((h,w,3)) kvar=int(kmeans.get()) locs=np.where(tempdisplayimg==1) binaryimg[locs]=[240,228,66] colordeimg=np.zeros((h,w,3)) # binarypreview=cv2.resize(binaryimg,(int(previewshape[0]),int(previewshape[1]))) binarypreview=np.copy(binaryimg) if kvar==1: if colordivimg.min()<0: # if abs(colordivimg.min())<colordivimg.max(): colordivimg=colordivimg-colordivimg.min() colorrange=colordivimg.max()-colordivimg.min() colordivimg=colordivimg*255/colorrange grayimg=Image.fromarray(colordivimg.astype('uint8'),'L') grayimg=grayimg.resize((int(resizeshape[0]),int(resizeshape[1]))) #grayimg.show() colordivdict={} colordivdict.update({'Size':[resizeshape[1],resizeshape[0]]}) colordivdict.update({'Image':ImageTk.PhotoImage(grayimg)}) displayimg['Color Deviation']=colordivdict colordivpreview={} # colordivpreimg=cv2.resize(colordivimg,(int(previewshape[0]),int(previewshape[1]))) graypreviewimg=Image.fromarray(colordivimg.astype('uint8'),'L') graypreviewimg=graypreviewimg.resize((int(previewshape[0]),int(previewshape[1]))) colordivpreview.update({'Size':[previewshape[1],previewshape[0]]}) colordivpreview.update({'Image':ImageTk.PhotoImage(graypreviewimg)}) previewimg['Color Deviation']=colordivpreview binaryimg=np.zeros((resizeshape[1],resizeshape[0],3)) tempdict={} tempdict.update({'Size':[resizeshape[1],resizeshape[0]]}) tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(binaryimg.astype('uint8')))}) displayimg['ColorIndices']=tempdict binarypreview=np.zeros((int(previewshape[1]),int(previewshape[0]))) tempdict={} tempdict.update({'Size':binarypreview.shape}) tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(binarypreview.astype('uint8')))}) previewimg['ColorIndices']=tempdict # changedisplayimg(imageframe,'Color Deviation') else: for i in range(kvar): locs=np.where(colordivimg==i) colordeimg[locs]=colorbandtable[i] #pyplt.imsave('displayimg.png',tempdisplayimg) #pyplt.imsave('allcolorindex.png',colordivimg) #bands=Image.fromarray(tempdisplayimg) #bands=bands.convert('L') #bands.save('displayimg.png') #indimg=cv2.imread('displayimg.png') colordeimg=Image.fromarray(colordeimg.astype('uint8')) colordeimg.save('allcolorindex.png',"PNG") binaryimg=Image.fromarray(binaryimg.astype('uint8')) binaryimg.save('binaryimg.png',"PNG") binaryimg=binaryimg.resize((int(resizeshape[0]),int(resizeshape[1]))) tempdict={} tempdict.update({'Size':[resizeshape[1],resizeshape[0]]}) tempdict.update({'Image':ImageTk.PhotoImage(binaryimg)}) displayimg['ColorIndices']=tempdict tempdict={} binaryimg=binaryimg.resize((int(previewshape[0]),int(previewshape[1]))) tempdict.update({'Size':[previewshape[1],previewshape[0]]}) tempdict.update({'Image':ImageTk.PhotoImage(binaryimg)}) previewimg['ColorIndices']=tempdict #indimg=cv2.imread('allcolorindex.png') #tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(indimg))}) # # colorimg=cv2.imread('allcolorindex.png') # Image.fromarray((binaryimg.astype('uint8'))).save('binaryimg.png',"PNG") colordeimg=colordeimg.resize((resizeshape[0],resizeshape[1])) colordivdict={} colordivdict.update({'Size':[resizeshape[1],resizeshape[0]]}) colordivdict.update({'Image':ImageTk.PhotoImage(colordeimg)}) displayimg['Color Deviation']=colordivdict colordivdict={} # colordeimgpre=cv2.resize(colordeimg,(int(previewshape[0]),int(previewshape[1]))) colordeimg=colordeimg.resize((previewshape[0],previewshape[1])) colordivdict.update({'Size':[previewshape[1],previewshape[0]]}) colordivdict.update({'Image':ImageTk.PhotoImage(colordeimg)}) previewimg['Color Deviation']=colordivdict # changedisplayimg(imageframe,'ColorIndices') # print('sel count',sel_count) if kvar>1: if sel_count==0: changedisplayimg(imageframe,'Color Deviation') else: changedisplayimg(imageframe,'ColorIndices') # changekmeans=True #def kmeansclassify(choicelist,reshapedtif): def kmeansclassify_oldversion(): global clusterdisplay #,minipixelareaclass if int(kmeans.get())==0: return #for i in range(len(choicelist)): # tempband=displaybandarray[currentfilename][choicelist[i]] #tempband=cv2.resize(tempband,(450,450),interpolation=cv2.INTER_LINEAR) # reshapedtif[:,i]=tempband.reshape(tempband.shape[0]*tempband.shape[1],2)[:,0] #if len(choicelist)==0: originpcabands=displaybandarray[currentfilename]['LabOstu'] pcah,pcaw,pcac=originpcabands.shape pcacount={} keys=list(pcaboxdict.keys()) for item in keys: if pcaboxdict[item].get()=='1': pcacount.update({item:pcaboxdict[item]}) pcakeys=list(pcacount.keys()) tempband=np.zeros((pcah,pcaw,len(pcakeys))) for i in range(len(pcakeys)): channel=int(pcakeys[i])-1 tempband[:,:,i]=tempband[:,:,i]+originpcabands[:,:,channel] if int(kmeans.get())==1: print('kmeans=1') displaylabels=np.mean(tempband,axis=2) pyplt.imsave('k=1.png',displaylabels) else: #tempband=displaybandarray[currentfilename]['LabOstu'] if int(kmeans.get())>1: h,w,c=tempband.shape print('shape',tempband.shape) reshapedtif=tempband.reshape(tempband.shape[0]*tempband.shape[1],c) print('reshape',reshapedtif.shape) clf=KMeans(n_clusters=int(kmeans.get()),init='k-means++',n_init=10,random_state=0) tempdisplayimg=clf.fit(reshapedtif) # print('label=0',np.any(tempdisplayimg==0)) displaylabels=tempdisplayimg.labels_.reshape((displaybandarray[currentfilename]['LabOstu'].shape[0], displaybandarray[currentfilename]['LabOstu'].shape[1])) clusterdict={} displaylabels=displaylabels+10 for i in range(int(kmeans.get())): locs=np.where(tempdisplayimg.labels_==i) maxval=reshapedtif[locs].max() print(maxval) clusterdict.update({maxval:i+10}) print(clusterdict) sortcluster=list(sorted(clusterdict)) print(sortcluster) for i in range(len(sortcluster)): cluster_num=clusterdict[sortcluster[i]] displaylabels=np.where(displaylabels==cluster_num,i,displaylabels) # pixelarea=1.0 # for i in range(int(kmeans.get())): # pixelloc=np.where(displaylabels==i) # pixelnum=len(pixelloc[0]) # temparea=float(pixelnum/(displaylabels.shape[0]*displaylabels.shape[1])) # if temparea<pixelarea: # #minipixelareaclass=i # pixelarea=temparea if kmeans.get() not in clusterdisplay: tempdict={kmeans.get():displaylabels} #clusterdisplay.update({''.join(choicelist):tempdict}) clusterdisplay.update(tempdict) return displaylabels def kmeansclassify(): global clusterdisplay,displaylabels if int(kmeans.get())==0: return originpcabands=displaybandarray[currentfilename]['LabOstu'] pcah,pcaw,pcac=originpcabands.shape pcpara=pc_combine_up.get() print(pcpara,type(pcpara)) tempband=np.zeros((pcah,pcaw,1)) # pcsel=buttonvar.get()+2 pcsel=buttonvar.get() pcweights=pc_combine_up.get()-0.5 if pcweights==0.0: tempband[:,:,0]=tempband[:,:,0]+originpcabands[:,:,pcsel] else: if pcweights<0.0: #RGBPC1 rgbpc=originpcabands[:,:,9] else: rgbpc=originpcabands[:,:,10] rgbpc=(rgbpc-rgbpc.min())*255/(rgbpc.max()-rgbpc.min()) firstterm=abs(pcweights)*2*rgbpc colorpc=originpcabands[:,:,pcsel] colorpc=(colorpc-colorpc.min())*255/(colorpc.max()-colorpc.min()) secondterm=(1-abs(pcweights)*2)*colorpc tempband[:,:,0]=tempband[:,:,0]+firstterm+secondterm if int(kmeans.get())==1: print('kmeans=1') displaylabels=np.mean(tempband,axis=2) pyplt.imsave('k=1.png',displaylabels) else: if int(kmeans.get())>1: h,w,c=tempband.shape print('shape',tempband.shape) reshapedtif=tempband.reshape(tempband.shape[0]*tempband.shape[1],c) if partialpca==True: partialshape=reshapedtif[nonzero_vector] print('partial reshape',partialshape.shape) clf=KMeans(n_clusters=int(kmeans.get()),init='k-means++',n_init=10,random_state=0) tempdisplayimg=clf.fit(partialshape) reshapedtif[nonzero_vector,0]=np.add(tempdisplayimg.labels_,1) print(reshapedtif[nonzero_vector]) displaylabels=reshapedtif.reshape((displaybandarray[currentfilename]['LabOstu'].shape[0], displaybandarray[currentfilename]['LabOstu'].shape[1])) # reshapedtif=cv2.resize(reshapedtif,(c,resizeshape[0]*resizeshape[1]),cv2.INTER_LINEAR) clusterdict={} displaylabels=displaylabels+10 for i in range(int(kmeans.get())): locs=np.where(tempdisplayimg.labels_==i) try: maxval=partialshape[locs].max() except: print('kmeans',i) messagebox.showerror('Cluster maximum value is ', i) return displaylabels print(maxval) clusterdict.update({maxval:i+11}) print(clusterdict) sortcluster=list(sorted(clusterdict)) print(sortcluster) for i in range(len(sortcluster)): cluster_num=clusterdict[sortcluster[i]] displaylabels=np.where(displaylabels==cluster_num,i,displaylabels) return displaylabels else: print('reshape',reshapedtif.shape) clf=KMeans(n_clusters=int(kmeans.get()),init='k-means++',n_init=10,random_state=0) tempdisplayimg=clf.fit(reshapedtif) # print('label=0',np.any(tempdisplayimg==0)) displaylabels=tempdisplayimg.labels_.reshape((displaybandarray[currentfilename]['LabOstu'].shape[0], displaybandarray[currentfilename]['LabOstu'].shape[1])) # displaylabels=tempdisplayimg.labels_.reshape((resizeshape[1],resizeshape[0])) clusterdict={} displaylabels=displaylabels+10 for i in range(int(kmeans.get())): locs=np.where(tempdisplayimg.labels_==i) maxval=reshapedtif[locs].max() print(maxval) clusterdict.update({maxval:i+10}) print(clusterdict) sortcluster=list(sorted(clusterdict)) print(sortcluster) for i in range(len(sortcluster)): cluster_num=clusterdict[sortcluster[i]] displaylabels=np.where(displaylabels==cluster_num,i,displaylabels) # if kmeans.get() not in clusterdisplay: # tempdict={kmeans.get():displaylabels} # #clusterdisplay.update({''.join(choicelist):tempdict}) # clusterdisplay.update(tempdict) return displaylabels def addcolorstrip(): global kmeanscanvasframe,havecolorstrip if havecolorstrip is False: colornum=int(kmeans.get()) for widget in kmeanscanvasframe.winfo_children(): widget.pack_forget() widget.delete(ALL) widget.config(width=350,height=10) widget.create_image(3,0,image=colorstripdict['colorstrip'+str(colornum)],anchor=NW) widget.pack() havecolorstrip=True def getPCs(): global displayimg,displaypclabels originpcabands=displaybandarray[currentfilename]['LabOstu'] pcah,pcaw,pcac=originpcabands.shape pcweights=pc_combine_up.get()-0.5 tempband=np.zeros((pcah,pcaw)) # pcsel=buttonvar.get()+2 pcsel=buttonvar.get() if pcweights==0.0: tempband=tempband+originpcabands[:,:,pcsel] else: if pcweights<0.0: #RGBPC1 rgbpc=originpcabands[:,:,9] else: rgbpc=originpcabands[:,:,10] rgbpc=(rgbpc-rgbpc.min())*255/(rgbpc.max()-rgbpc.min()) firstterm=abs(pcweights)*2*rgbpc colorpc=originpcabands[:,:,pcsel] colorpc=(colorpc-colorpc.min())*255/(colorpc.max()-colorpc.min()) secondterm=(1-abs(pcweights)*2)*colorpc tempband=tempband+firstterm+secondterm displaypclabels=np.copy(tempband) displaylabels=np.copy(tempband) pyplt.imsave('k=1.png',displaylabels) colordivimg=np.copy(displaylabels) print('origin pc range',colordivimg.max(),colordivimg.min()) # colordivimg=cv2.resize(tempcolorimg,(int(resizeshape[0]),int(resizeshape[1]))) print('pc range',colordivimg.max(),colordivimg.min()) if colordivimg.min()<0: colordivimg=colordivimg-colordivimg.min() colorrange=colordivimg.max()-colordivimg.min() colordivimg=(colordivimg)*255/colorrange colordivimg=Image.fromarray(colordivimg.astype('uint8'),'L') colordivimg=colordivimg.resize((int(resizeshape[0]),int(resizeshape[1])),Image.ANTIALIAS) displayimg['PCs']['Image']=ImageTk.PhotoImage(colordivimg) # displayimg['Color Deviation']['Image']=ImageTk.PhotoImage(colordivimg) def getPCs_olcversion(): global displayimg originpcabands=displaybandarray[currentfilename]['LabOstu'] pcah,pcaw,pcac=originpcabands.shape pcacount={} keys=list(pcaboxdict.keys()) for item in keys: if pcaboxdict[item].get()=='1': pcacount.update({item:pcaboxdict[item]}) pcakeys=list(pcacount.keys()) tempband=np.zeros((pcah,pcaw,len(pcakeys))) for i in range(len(pcakeys)): channel=int(pcakeys[i])-1 tempband[:,:,i]=tempband[:,:,i]+originpcabands[:,:,channel] # if int(kmeans.get())==1: print('kmeans=1') displaylabels=np.mean(tempband,axis=2) pyplt.imsave('k=1.png',displaylabels) ratio=findratio([originpcabands.shape[0],originpcabands.shape[1]],[screenstd,screenstd]) tempcolorimg=
np.copy(displaylabels)
numpy.copy
""" .. _multi-taper-coh: ================================ Multi-taper coherence estimation ================================ Coherence estimation can be done using windowed-spectra. This is the method used in the example :ref:`resting-state`. In addition, multi-taper spectral estimation can be used in order to calculate coherence and also confidence intervals for the coherence values that result (see :ref:`multi-taper-psd`) The data analyzed here is an fMRI data-set contributed by <NAME>. The data is taken from a single subject in a"resting-state" scan, in which subjects are fixating on a cross and maintaining alert wakefulness, but not performing any other behavioral task. We start by importing modules/functions we will use in this example and define variables which will be used as the sampling interval of the TimeSeries objects and as upper and lower bounds on the frequency range analyzed: """ import os import numpy as np import matplotlib.pyplot as plt from matplotlib.mlab import csv2rec import scipy.stats.distributions as dist from scipy import fftpack import nitime from nitime.timeseries import TimeSeries from nitime import utils import nitime.algorithms as alg import nitime.viz from nitime.viz import drawmatrix_channels from nitime.analysis import CoherenceAnalyzer, MTCoherenceAnalyzer TR = 1.89 f_ub = 0.15 f_lb = 0.02 """ We read in the data into a recarray from a csv file: """ data_path = os.path.join(nitime.__path__[0], 'data') data_rec = csv2rec(os.path.join(data_path, 'fmri_timeseries.csv')) """ The first line in the file contains the names of the different brain regions (or ROI = regions of interest) from which the time-series were derived. We extract the data into a regular array, while keeping the names to be used later: """ roi_names =
np.array(data_rec.dtype.names)
numpy.array
# This module has been generated automatically from space group information # obtained from the Computational Crystallography Toolbox # """ Space groups This module contains a list of all the 230 space groups that can occur in a crystal. The variable space_groups contains a dictionary that maps space group numbers and space group names to the corresponding space group objects. .. moduleauthor:: <NAME> <<EMAIL>> """ #----------------------------------------------------------------------------- # Copyright (C) 2013 The Mosaic Development Team # # Distributed under the terms of the BSD License. The full license is in # the file LICENSE.txt, distributed as part of this software. #----------------------------------------------------------------------------- import numpy as N class SpaceGroup(object): """ Space group All possible space group objects are created in this module. Other modules should access these objects through the dictionary space_groups rather than create their own space group objects. """ def __init__(self, number, symbol, transformations): """ :param number: the number assigned to the space group by international convention :type number: int :param symbol: the Hermann-Mauguin space-group symbol as used in PDB and mmCIF files :type symbol: str :param transformations: a list of space group transformations, each consisting of a tuple of three integer arrays (rot, tn, td), where rot is the rotation matrix and tn/td are the numerator and denominator of the translation vector. The transformations are defined in fractional coordinates. :type transformations: list """ self.number = number self.symbol = symbol self.transformations = transformations self.transposed_rotations = N.array([N.transpose(t[0]) for t in transformations]) self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2] for t in transformations])) def __repr__(self): return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol)) def __len__(self): """ :return: the number of space group transformations :rtype: int """ return len(self.transformations) def symmetryEquivalentMillerIndices(self, hkl): """ :param hkl: a set of Miller indices :type hkl: Scientific.N.array_type :return: a tuple (miller_indices, phase_factor) of two arrays of length equal to the number of space group transformations. miller_indices contains the Miller indices of each reflection equivalent by symmetry to the reflection hkl (including hkl itself as the first element). phase_factor contains the phase factors that must be applied to the structure factor of reflection hkl to obtain the structure factor of the symmetry equivalent reflection. :rtype: tuple """ hkls = N.dot(self.transposed_rotations, hkl) p = N.multiply.reduce(self.phase_factors**hkl, -1) return hkls, p space_groups = {} transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(1, 'P 1', transformations) space_groups[1] = sg space_groups['P 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(2, 'P -1', transformations) space_groups[2] = sg space_groups['P -1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(3, 'P 1 2 1', transformations) space_groups[3] = sg space_groups['P 1 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(4, 'P 1 21 1', transformations) space_groups[4] = sg space_groups['P 1 21 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(5, 'C 1 2 1', transformations) space_groups[5] = sg space_groups['C 1 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(6, 'P 1 m 1', transformations) space_groups[6] = sg space_groups['P 1 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(7, 'P 1 c 1', transformations) space_groups[7] = sg space_groups['P 1 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(8, 'C 1 m 1', transformations) space_groups[8] = sg space_groups['C 1 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(9, 'C 1 c 1', transformations) space_groups[9] = sg space_groups['C 1 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(10, 'P 1 2/m 1', transformations) space_groups[10] = sg space_groups['P 1 2/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(11, 'P 1 21/m 1', transformations) space_groups[11] = sg space_groups['P 1 21/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(12, 'C 1 2/m 1', transformations) space_groups[12] = sg space_groups['C 1 2/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(13, 'P 1 2/c 1', transformations) space_groups[13] = sg space_groups['P 1 2/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(14, 'P 1 21/c 1', transformations) space_groups[14] = sg space_groups['P 1 21/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(15, 'C 1 2/c 1', transformations) space_groups[15] = sg space_groups['C 1 2/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(16, 'P 2 2 2', transformations) space_groups[16] = sg space_groups['P 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(17, 'P 2 2 21', transformations) space_groups[17] = sg space_groups['P 2 2 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(18, 'P 21 21 2', transformations) space_groups[18] = sg space_groups['P 21 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(19, 'P 21 21 21', transformations) space_groups[19] = sg space_groups['P 21 21 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(20, 'C 2 2 21', transformations) space_groups[20] = sg space_groups['C 2 2 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(21, 'C 2 2 2', transformations) space_groups[21] = sg space_groups['C 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(22, 'F 2 2 2', transformations) space_groups[22] = sg space_groups['F 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(23, 'I 2 2 2', transformations) space_groups[23] = sg space_groups['I 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(24, 'I 21 21 21', transformations) space_groups[24] = sg space_groups['I 21 21 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(25, 'P m m 2', transformations) space_groups[25] = sg space_groups['P m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(26, 'P m c 21', transformations) space_groups[26] = sg space_groups['P m c 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(27, 'P c c 2', transformations) space_groups[27] = sg space_groups['P c c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(28, 'P m a 2', transformations) space_groups[28] = sg space_groups['P m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(29, 'P c a 21', transformations) space_groups[29] = sg space_groups['P c a 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(30, 'P n c 2', transformations) space_groups[30] = sg space_groups['P n c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(31, 'P m n 21', transformations) space_groups[31] = sg space_groups['P m n 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(32, 'P b a 2', transformations) space_groups[32] = sg space_groups['P b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(33, 'P n a 21', transformations) space_groups[33] = sg space_groups['P n a 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(34, 'P n n 2', transformations) space_groups[34] = sg space_groups['P n n 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(35, 'C m m 2', transformations) space_groups[35] = sg space_groups['C m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(36, 'C m c 21', transformations) space_groups[36] = sg space_groups['C m c 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(37, 'C c c 2', transformations) space_groups[37] = sg space_groups['C c c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(38, 'A m m 2', transformations) space_groups[38] = sg space_groups['A m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(39, 'A b m 2', transformations) space_groups[39] = sg space_groups['A b m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(40, 'A m a 2', transformations) space_groups[40] = sg space_groups['A m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(41, 'A b a 2', transformations) space_groups[41] = sg space_groups['A b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(42, 'F m m 2', transformations) space_groups[42] = sg space_groups['F m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(43, 'F d d 2', transformations) space_groups[43] = sg space_groups['F d d 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(44, 'I m m 2', transformations) space_groups[44] = sg space_groups['I m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(45, 'I b a 2', transformations) space_groups[45] = sg space_groups['I b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(46, 'I m a 2', transformations) space_groups[46] = sg space_groups['I m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(47, 'P m m m', transformations) space_groups[47] = sg space_groups['P m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(48, 'P n n n :2', transformations) space_groups[48] = sg space_groups['P n n n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(49, 'P c c m', transformations) space_groups[49] = sg space_groups['P c c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(50, 'P b a n :2', transformations) space_groups[50] = sg space_groups['P b a n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(51, 'P m m a', transformations) space_groups[51] = sg space_groups['P m m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(52, 'P n n a', transformations) space_groups[52] = sg space_groups['P n n a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(53, 'P m n a', transformations) space_groups[53] = sg space_groups['P m n a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(54, 'P c c a', transformations) space_groups[54] = sg space_groups['P c c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(55, 'P b a m', transformations) space_groups[55] = sg space_groups['P b a m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(56, 'P c c n', transformations) space_groups[56] = sg space_groups['P c c n'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(57, 'P b c m', transformations) space_groups[57] = sg space_groups['P b c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(58, 'P n n m', transformations) space_groups[58] = sg space_groups['P n n m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(59, 'P m m n :2', transformations) space_groups[59] = sg space_groups['P m m n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(60, 'P b c n', transformations) space_groups[60] = sg space_groups['P b c n'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(61, 'P b c a', transformations) space_groups[61] = sg space_groups['P b c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(62, 'P n m a', transformations) space_groups[62] = sg space_groups['P n m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(63, 'C m c m', transformations) space_groups[63] = sg space_groups['C m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num =
N.array([1,0,1])
numpy.array
# Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import inspect import unicodedata import warnings import pandas as pd import numpy as np import holidays as hdays_part1 import fbprophet.hdays as hdays_part2 from fbprophet.make_holidays import make_holidays_df def utf8_to_ascii(text): """Holidays often have utf-8 characters. These are not allowed in R package data (they generate a NOTE). TODO: revisit whether we want to do this lossy conversion. """ ascii_text = ( unicodedata.normalize('NFD', text) .encode('ascii', 'ignore') .decode('ascii') .strip() ) # Check if anything converted if sum(1 for x in ascii_text if x not in [' ', '(', ')', ',']) == 0: return 'FAILED_TO_PARSE' else: return ascii_text def generate_holidays_file(): """Generate csv file of all possible holiday names, ds, and countries, year combination """ year_list =
np.arange(1995, 2045, 1)
numpy.arange
import numpy as np def affine_forward(x, w, b): """ Computes the forward pass for an affine (fully-connected) layer. The input x has shape (N, d_1, ..., d_k) where x[i] is the ith input. We multiply this against a weight matrix of shape (D, M) where D = \prod_i d_i Inputs: x - Input data, of shape (N, d_1, ..., d_k) w - Weights, of shape (D, M) b - Biases, of shape (M,) Returns a tuple of: - out: output, of shape (N, M) - cache: (x, w, b) """ out = None ############################################################################# # TODO: Implement the affine forward pass. Store the result in out. You # # will need to reshape the input into rows. # ############################################################################# x_vec = np.reshape(x, (x.shape[0], np.prod(x.shape[1:len(x.shape)]))) out =
np.matmul(x_vec, w)
numpy.matmul
import os import argparse import warnings warnings.filterwarnings('ignore') import time from datetime import datetime import random import numpy as np np.set_printoptions(precision=4, edgeitems=6, linewidth=100, suppress=True) from utils.misc.hyper_params import default_hps from utils.misc.data_handler import DataHandler from utils.misc.data_handler import load_raw_data_list, encode_batch, next_batch from models.missVAE import missVAE def model_est_series(hps, model, N_batches, obs_dataset, action_dataset, reward_dataset, domain_dataset, save_p): logmix_dataset = [] mu_dataset = [] logvar_dataset = [] domain_dataset2 = [] dataset_range = np.arange(N_batches * hps.batch_size) start_time = time.time() for i in range(N_batches): batch_obs, batch_action, batch_reward, batch_domain = next_batch(hps, obs_dataset, action_dataset, reward_dataset, domain_dataset, i, dataset_range) action_init = np.zeros((hps.batch_size, 1, hps.action_size)) batch_action_prev = np.concatenate((action_init, batch_action[:, :-1, :]), axis=1) reward_init = np.zeros((hps.batch_size, 1, hps.reward_size)) batch_reward_prev = np.concatenate((reward_init, batch_reward[:, :-1, :]), axis=1) batch_logmix, batch_mu, batch_logvar = \ encode_batch(hps, model, batch_obs, batch_action_prev, batch_reward_prev, batch_domain, hps.max_seq_len) logmix_dataset.append(batch_logmix.astype(np.float16)) mu_dataset.append(batch_mu.astype(np.float16)) logvar_dataset.append(batch_logvar.astype(np.float16)) domain_dataset2.append(batch_domain.astype(np.int32)) # N_batches x batch_size logmix_dataset = np.reshape(np.array(logmix_dataset), (-1, hps.max_seq_len, hps.z_size, hps.num_mixture)) mu_dataset = np.reshape(np.array(mu_dataset), (-1, hps.max_seq_len, hps.z_size, hps.num_mixture)) logvar_dataset = np.reshape(np.array(logvar_dataset), (-1, hps.max_seq_len, hps.z_size, hps.num_mixture)) domain_dataset2 = np.reshape(np.array(domain_dataset2), (-1)) # (N_batches x batch_size) # extend it domain_dataset3 = [] for i in range(N_batches * hps.batch_size): d0 = np.stack([domain_dataset2[i]] * (hps.max_seq_len - 1)) domain_dataset3.append(d0) domain_dataset3 = np.reshape(domain_dataset3, [-1]) at = np.reshape(action_dataset[:, :-1, :], (-1, hps.action_size)) st_logmix = np.reshape(logmix_dataset[:, :-1, :, :], (-1, hps.z_size, hps.num_mixture)) st_mu = np.reshape(mu_dataset[:, :-1, :, :], (-1, hps.z_size, hps.num_mixture)) st_logvar = np.reshape(logvar_dataset[:, :-1, :, :], (-1, hps.z_size, hps.num_mixture)) st1_logmix = np.reshape(logmix_dataset[:, 1:, :, :], (-1, hps.z_size, hps.num_mixture)) st1_mu = np.reshape(mu_dataset[:, 1:, :, :], (-1, hps.z_size, hps.num_mixture)) st1_logvar = np.reshape(logvar_dataset[:, 1:, :, :], (-1, hps.z_size, hps.num_mixture)) np.savez_compressed(os.path.join(save_p, 'series.npz'), action=at, domain=domain_dataset3, st_logmix=st_logmix, st_mu=st_mu, st_logvar=st_logvar, st1_logmix=st1_logmix, st1_mu=st1_mu, st1_logvar=st1_logvar) time_taken = time.time() - start_time print("time taken on series: %.4f" % time_taken) parser = argparse.ArgumentParser() parser.add_argument('-name', type=str, required=True, help='data path') parser.add_argument('-source', type=str, required=True, help='data path') parser.add_argument('-dest', type=str, required=True, help='data path') parser.add_argument('-domain', type=str, nargs='+', required=True, help='full domain index') args = parser.parse_args() game = args.name source_p = args.source dest_p = args.dest src_domain_index = args.domain # Parameters for training NUM_EPOCH = 1000 date_format = '%A_%d_%B_%Y_%Hh_%Mm_%Ss' time_now = datetime.now().strftime(date_format) data_indicator = args.source.split('/', 2)[-1] if not os.path.exists(dest_p): os.makedirs(dest_p) # there 4 steps for model estimation, # i.e. estimate the VAE components, the series components, the dynamic components, and all components together N_datas = [10000 * 5, None, 10000 * 5, 10000 * 5] model_est_steps = ['vae', 'series', 'dynamic', 'all'] for m_step in range(4): model_save_path = './results/' + data_indicator + '/' + model_est_steps[m_step] + '/' + time_now if not os.path.exists(model_save_path): os.makedirs(model_save_path) if m_step != 1: model_save_epochs_path = './results/' + data_indicator + '/' + model_est_steps[ m_step] + '/' + time_now + '/epochs' if not os.path.exists(model_save_epochs_path): os.makedirs(model_save_epochs_path) if m_step == 0: structure_save_steps_path = './results/' + data_indicator + '/' + model_est_steps[m_step] + '/' + time_now + \ '/structure_steps' if not os.path.exists(structure_save_steps_path): os.makedirs(structure_save_steps_path) hps = default_hps(game, m_step) N_data = N_datas[m_step] if N_data == None: filelist = os.listdir(dest_p) random.shuffle(filelist) filelist.sort() obs_dataset, action_dataset, reward_dataset, domain_dataset = load_raw_data_list(dest_p, filelist) N_data = len(obs_dataset) N_batches = int(np.floor(N_data / hps.batch_size)) mvae = missVAE(hps, m_step, model_est_steps[m_step]) if m_step == 3: vae_p = './results/' + data_indicator + '/' + model_est_steps[0] + '/' + time_now + '/vae.json' mvae.load_json(os.path.join(vae_p)) dyn_p = './results/' + data_indicator + '/' + model_est_steps[2] + '/' + time_now + '/dynamic.json' mvae.load_json(os.path.join(dyn_p), is_dyn=True) if m_step == 1: vae_p = './results/' + data_indicator + '/' + model_est_steps[0] + '/' + time_now + '/vae.json' mvae.load_json(os.path.join(vae_p)) model_est_series(hps, mvae, N_batches, obs_dataset, action_dataset, reward_dataset, domain_dataset, model_save_path) else: print('sodifjsadfa') if m_step == 2: dest_p = './results/' + data_indicator + '/' + model_est_steps[1] + '/' + time_now dh = DataHandler(hps, m_step, source_p, dest_p, src_domain_index, N_data) curr_learning_rate = 1 sign = 1 # 0: not adding random noise to gradient; 1: add ##################################### Training ##################################### for epoch in range(NUM_EPOCH): for idx in range(N_batches): step = mvae.sess.run(mvae.global_step) curr_learning_rate = \ (hps.learning_rate - hps.min_learning_rate) * hps.decay_rate ** step + hps.min_learning_rate if m_step == 0 or m_step == 3: # note that we also should get domain index batch_obs, batch_action, batch_reward, batch_domain_index = dh.next_batch() action_init =
np.zeros((hps.batch_size, 1, hps.action_size))
numpy.zeros
""" @Author: <NAME> @Filename: dataset.py @Contact: <EMAIL> @Time: 2021/12/28 9:24 @Discription: dataset """ import torch import numpy as np from os.path import join from random import randint, normalvariate from torch.utils.data import Dataset, DataLoader import AFLink.config as cfg SEQ = { 'train': [ 'MOT17-02-FRCNN', 'MOT17-04-FRCNN', 'MOT17-05-FRCNN', 'MOT17-09-FRCNN', 'MOT17-10-FRCNN', 'MOT17-11-FRCNN', 'MOT17-13-FRCNN' ], 'test': [ 'MOT17-01-FRCNN', 'MOT17-03-FRCNN', 'MOT17-06-FRCNN', 'MOT17-07-FRCNN', 'MOT17-08-FRCNN', 'MOT17-12-FRCNN', 'MOT17-14-FRCNN' ] } class LinkData(Dataset): def __init__(self, root, mode='train', minLen=cfg.model_minLen, inputLen=cfg.model_inputLen): """ :param minLen: 仅考虑长度超过该阈值的GT轨迹 :param inputLen: 网络输入轨迹长度 """ self.minLen = minLen self.inputLen = inputLen if root: assert mode in ('train', 'val') self.root = root self.mode = mode self.id2info = self.initialize() self.ids = list(self.id2info.keys()) def initialize(self): id2info = dict() for seqid, seq in enumerate(SEQ['train'], start=1): path_gt = join(self.root, '{}/gt/gt_{}_half.txt'.format(seq, self.mode)) gts =
np.loadtxt(path_gt, delimiter=',')
numpy.loadtxt
#!/usr/bin/env python # Copyright 2019 Division of Medical Image Computing, German Cancer Research Center (DKFZ). # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import plotting as plg import os from multiprocessing import Pool, Lock import pickle import warnings import numpy as np import pandas as pd from batchgenerators.transforms.abstract_transforms import AbstractTransform from scipy.ndimage.measurements import label as lb from torch.utils.data import Dataset as torchDataset from batchgenerators.dataloading.data_loader import SlimDataLoaderBase import utils.exp_utils as utils import data_manager as dmanager for msg in ["This figure includes Axes that are not compatible with tight_layout", "Data has no positive values, and therefore cannot be log-scaled."]: warnings.filterwarnings("ignore", msg) class AttributeDict(dict): __getattr__ = dict.__getitem__ __setattr__ = dict.__setitem__ ################################## # data loading, organisation # ################################## class fold_generator: """ generates splits of indices for a given length of a dataset to perform n-fold cross-validation. splits each fold into 3 subsets for training, validation and testing. This form of cross validation uses an inner loop test set, which is useful if test scores shall be reported on a statistically reliable amount of patients, despite limited size of a dataset. If hold out test set is provided and hence no inner loop test set needed, just add test_idxs to the training data in the dataloader. This creates straight-forward train-val splits. :returns names list: list of len n_splits. each element is a list of len 3 for train_ix, val_ix, test_ix. """ def __init__(self, seed, n_splits, len_data): """ :param seed: Random seed for splits. :param n_splits: number of splits, e.g. 5 splits for 5-fold cross-validation :param len_data: number of elements in the dataset. """ self.tr_ix = [] self.val_ix = [] self.te_ix = [] self.slicer = None self.missing = 0 self.fold = 0 self.len_data = len_data self.n_splits = n_splits self.myseed = seed self.boost_val = 0 def init_indices(self): t = list(np.arange(self.l)) # round up to next splittable data amount. split_length = int(np.ceil(len(t) / float(self.n_splits))) self.slicer = split_length self.mod = len(t) % self.n_splits if self.mod > 0: # missing is the number of folds, in which the new splits are reduced to account for missing data. self.missing = self.n_splits - self.mod self.te_ix = t[:self.slicer] self.tr_ix = t[self.slicer:] self.val_ix = self.tr_ix[:self.slicer] self.tr_ix = self.tr_ix[self.slicer:] def new_fold(self): slicer = self.slicer if self.fold < self.missing : slicer = self.slicer - 1 temp = self.te_ix # catch exception mod == 1: test set collects 1+ data since walk through both roudned up splits. # account for by reducing last fold split by 1. if self.fold == self.n_splits-2 and self.mod ==1: temp += self.val_ix[-1:] self.val_ix = self.val_ix[:-1] self.te_ix = self.val_ix self.val_ix = self.tr_ix[:slicer] self.tr_ix = self.tr_ix[slicer:] + temp def get_fold_names(self): names_list = [] rgen = np.random.RandomState(self.myseed) cv_names = np.arange(self.len_data) rgen.shuffle(cv_names) self.l = len(cv_names) self.init_indices() for split in range(self.n_splits): train_names, val_names, test_names = cv_names[self.tr_ix], cv_names[self.val_ix], cv_names[self.te_ix] names_list.append([train_names, val_names, test_names, self.fold]) self.new_fold() self.fold += 1 return names_list class FoldGenerator(): r"""takes a set of elements (identifiers) and randomly splits them into the specified amt of subsets. """ def __init__(self, identifiers, seed, n_splits=5): self.ids = np.array(identifiers) self.n_splits = n_splits self.seed = seed def generate_splits(self, n_splits=None): if n_splits is None: n_splits = self.n_splits rgen = np.random.RandomState(self.seed) rgen.shuffle(self.ids) self.splits = list(np.array_split(self.ids, n_splits, axis=0)) # already returns list, but to be sure return self.splits class Dataset(torchDataset): r"""Parent Class for actual Dataset classes to inherit from! """ def __init__(self, cf, data_sourcedir=None): super(Dataset, self).__init__() self.cf = cf self.data_sourcedir = cf.data_sourcedir if data_sourcedir is None else data_sourcedir self.data_dir = cf.data_dir if hasattr(cf, 'data_dir') else self.data_sourcedir self.data_dest = cf.data_dest if hasattr(cf, "data_dest") else self.data_sourcedir self.data = {} self.set_ids = [] def copy_data(self, cf, file_subset, keep_packed=False, del_after_unpack=False): if os.path.normpath(self.data_sourcedir) != os.path.normpath(self.data_dest): self.data_sourcedir = os.path.join(self.data_sourcedir, '') args = AttributeDict({ "source" : self.data_sourcedir, "destination" : self.data_dest, "recursive" : True, "cp_only_npz" : False, "keep_packed" : keep_packed, "del_after_unpack" : del_after_unpack, "threads" : 16 if self.cf.server_env else os.cpu_count() }) dmanager.copy(args, file_subset=file_subset) self.data_dir = self.data_dest def __len__(self): return len(self.data) def __getitem__(self, id): """Return a sample of the dataset, i.e.,the dict of the id """ return self.data[id] def __iter__(self): return self.data.__iter__() def init_FoldGenerator(self, seed, n_splits): self.fg = FoldGenerator(self.set_ids, seed=seed, n_splits=n_splits) def generate_splits(self, check_file): if not os.path.exists(check_file): self.fg.generate_splits() with open(check_file, 'wb') as handle: pickle.dump(self.fg.splits, handle) else: with open(check_file, 'rb') as handle: self.fg.splits = pickle.load(handle) def calc_statistics(self, subsets=None, plot_dir=None, overall_stats=True): if self.df is None: self.df = pd.DataFrame() balance_t = self.cf.balance_target if hasattr(self.cf, "balance_target") else "class_targets" self.df._metadata.append(balance_t) if balance_t=="class_targets": mapper = lambda cl_id: self.cf.class_id2label[cl_id] labels = self.cf.class_id2label.values() elif balance_t=="rg_bin_targets": mapper = lambda rg_bin: self.cf.bin_id2label[rg_bin] labels = self.cf.bin_id2label.values() # elif balance_t=="regression_targets": # # todo this wont work # mapper = lambda rg_val: AttributeDict({"name":rg_val}) #self.cf.bin_id2label[self.cf.rg_val_to_bin_id(rg_val)] # labels = self.cf.bin_id2label.values() elif balance_t=="lesion_gleasons": mapper = lambda gs: self.cf.gs2label[gs] labels = self.cf.gs2label.values() else: mapper = lambda x: AttributeDict({"name":x}) labels = None for pid, subj_data in self.data.items(): unique_ts, counts = np.unique(subj_data[balance_t], return_counts=True) self.df = self.df.append(pd.DataFrame({"pid": [pid], **{mapper(unique_ts[i]).name: [counts[i]] for i in range(len(unique_ts))}}), ignore_index=True, sort=True) self.df = self.df.fillna(0) if overall_stats: df = self.df.drop("pid", axis=1) df = df.reindex(sorted(df.columns), axis=1).astype('uint32') print("Overall dataset roi counts per target kind:"); print(df.sum()) if subsets is not None: self.df["subset"] = np.nan self.df["display_order"] = np.nan for ix, (subset, pids) in enumerate(subsets.items()): self.df.loc[self.df.pid.isin(pids), "subset"] = subset self.df.loc[self.df.pid.isin(pids), "display_order"] = ix df = self.df.groupby("subset").agg("sum").drop("pid", axis=1, errors='ignore').astype('int64') df = df.sort_values(by=['display_order']).drop('display_order', axis=1) df = df.reindex(sorted(df.columns), axis=1) print("Fold {} dataset roi counts per target kind:".format(self.cf.fold)); print(df) if plot_dir is not None: os.makedirs(plot_dir, exist_ok=True) if subsets is not None: plg.plot_fold_stats(self.cf, df, labels, os.path.join(plot_dir, "data_stats_fold_" + str(self.cf.fold))+".pdf") if overall_stats: plg.plot_data_stats(self.cf, df, labels, os.path.join(plot_dir, 'data_stats_overall.pdf')) return df, labels def get_class_balanced_patients(all_pids, class_targets, batch_size, num_classes, random_ratio=0): ''' samples towards equilibrium of classes (on basis of total RoI counts). for highly imbalanced dataset, this might be a too strong requirement. :param class_targets: dic holding {patient_specifier : ROI class targets}, list position of ROI target corresponds to respective seg label - 1 :param batch_size: :param num_classes: :return: ''' # assert len(all_pids)>=batch_size, "not enough eligible pids {} to form a single batch of size {}".format(len(all_pids), batch_size) class_counts = {k: 0 for k in range(1,num_classes+1)} not_picked = np.array(all_pids) batch_patients = np.empty((batch_size,), dtype=not_picked.dtype) rarest_class = np.random.randint(1,num_classes+1) for ix in range(batch_size): if len(not_picked) == 0: warnings.warn("Dataset too small to generate batch with unique samples; => recycling.") not_picked = np.array(all_pids) np.random.shuffle(not_picked) #this could actually go outside(above) the loop. pick = not_picked[0] for cand in not_picked: if np.count_nonzero(class_targets[cand] == rarest_class) > 0: pick = cand cand_rarest_class = np.argmin([np.count_nonzero(class_targets[cand] == cl) for cl in range(1,num_classes+1)])+1 # if current batch already bigger than the batch random ratio, then # check that weakest class in this patient is not the weakest in current batch (since needs to be boosted) # also that at least one roi of this patient belongs to weakest class. If True, keep patient, else keep looking. if (cand_rarest_class != rarest_class and np.count_nonzero(class_targets[cand] == rarest_class) > 0) \ or ix < int(batch_size * random_ratio): break for c in range(1,num_classes+1): class_counts[c] += np.count_nonzero(class_targets[pick] == c) if not ix < int(batch_size * random_ratio) and class_counts[rarest_class] == 0: # means searched thru whole set without finding rarest class print("Class {} not represented in current dataset.".format(rarest_class)) rarest_class = np.argmin(([class_counts[c] for c in range(1,num_classes+1)]))+1 batch_patients[ix] = pick not_picked = not_picked[not_picked != pick] # removes pick return batch_patients class BatchGenerator(SlimDataLoaderBase): """ create the training/validation batch generator. Randomly sample batch_size patients from the data set, (draw a random slice if 2D), pad-crop them to equal sizes and merge to an array. :param data: data dictionary as provided by 'load_dataset' :param img_modalities: list of strings ['adc', 'b1500'] from config :param batch_size: number of patients to sample for the batch :param pre_crop_size: equal size for merging the patients to a single array (before the final random-crop in data aug.) :return dictionary containing the batch data / seg / pids as lists; the augmenter will later concatenate them into an array. """ def __init__(self, cf, data, sample_pids_w_replace=True, max_batches=None, raise_stop_iteration=False, n_threads=None, seed=0): if n_threads is None: n_threads = cf.n_workers super(BatchGenerator, self).__init__(data, cf.batch_size, number_of_threads_in_multithreaded=n_threads) self.cf = cf self.random_count = int(cf.batch_random_ratio * cf.batch_size) self.plot_dir = os.path.join(self.cf.plot_dir, 'train_generator') os.makedirs(self.plot_dir, exist_ok=True) self.max_batches = max_batches self.raise_stop = raise_stop_iteration self.thread_id = 0 self.batches_produced = 0 self.dataset_length = len(self._data) self.dataset_pids = list(self._data.keys()) self.rgen = np.random.RandomState(seed=seed) self.eligible_pids = self.rgen.permutation(self.dataset_pids.copy()) self.eligible_pids =
np.array_split(self.eligible_pids, self.number_of_threads_in_multithreaded)
numpy.array_split
# %% [markdown] # # Harris corner detector # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/YoniChechik/AI_is_Math/blob/master/c_08_features/harris.ipynb) # %% # to run in google colab import sys if 'google.colab' in sys.modules: import subprocess subprocess.call('apt-get install subversion'.split()) subprocess.call('svn export https://github.com/YoniChechik/AI_is_Math/trunk/c_08_features/chess.jpg'.split()) # %% import cv2 import numpy as np import matplotlib.pyplot as plt # %% imgBGR = cv2.imread("chess.jpg") imgRGB = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2RGB) img = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY).astype(float)/255 plt.figure(figsize=(10, 10)) plt.imshow(imgRGB) plt.show() # %% [markdown] # ## harris- step by step # %% #derivatives in x and y dirs kernel_x = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]) Ix = cv2.filter2D(img, -1, kernel_x) kernel_y = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]) Iy = cv2.filter2D(img, -1, kernel_y) window_size = 3 offset = int(np.floor(window_size/2)) l_max = np.zeros(img.shape) l_min =
np.zeros(img.shape)
numpy.zeros
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Mar 4 10:09:21 2019 @author: nmei """ from autoreject import (AutoReject,get_rejection_threshold) import mne from glob import glob import re import os import numpy as np import pandas as pd import pickle #import faster # https://gist.github.com/wmvanvliet/d883c3fe1402c7ced6fc from sklearn.metrics import roc_auc_score,roc_curve from sklearn.metrics import ( classification_report, matthews_corrcoef, confusion_matrix, f1_score, log_loss, r2_score ) from sklearn.preprocessing import (MinMaxScaler, OneHotEncoder, FunctionTransformer, StandardScaler) from sklearn.pipeline import make_pipeline from sklearn.ensemble.forest import _generate_unsampled_indices from sklearn.utils import shuffle from sklearn.svm import SVC,LinearSVC from sklearn.calibration import CalibratedClassifierCV from sklearn.decomposition import PCA from sklearn.dummy import DummyClassifier from sklearn.feature_selection import (SelectFromModel, SelectPercentile, VarianceThreshold, mutual_info_classif, f_classif, chi2, f_regression, GenericUnivariateSelect) from sklearn.model_selection import (StratifiedShuffleSplit, cross_val_score) from sklearn.ensemble import RandomForestClassifier,BaggingClassifier,VotingClassifier from sklearn.neural_network import MLPClassifier from xgboost import XGBClassifier from itertools import product,combinations from sklearn.base import clone from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from collections import OrderedDict from scipy import stats from collections import Counter from mpl_toolkits.axes_grid1 import make_axes_locatable from matplotlib import pyplot as plt from matplotlib.pyplot import cm from nilearn.plotting.img_plotting import (_load_anat, _utils, _plot_img_with_bg, _get_colorbar_and_data_ranges, _safe_get_data) import matplotlib.patches as patches try: #from mvpa2.datasets.base import Dataset from mvpa2.mappers.fx import mean_group_sample #from mvpa2.measures import rsa #from mvpa2.measures.searchlight import sphere_searchlight #from mvpa2.base.learner import ChainLearner #from mvpa2.mappers.shape import TransposeMapper #from mvpa2.generators.partition import NFoldPartitioner except: pass#print('pymvpa is not installed') try: # from tqdm import tqdm_notebook as tqdm from tqdm.auto import tqdm except: print('why is tqdm not installed?') def preprocessing_conscious(raw, events, session, tmin = -0, tmax = 1, notch_filter = 50, event_id = {'living':1,'nonliving':2}, baseline = (None,None), perform_ICA = False, lowpass = None, interpolate_bad_channels = True,): """ 0. re-reference - explicitly """ raw_ref ,_ = mne.set_eeg_reference(raw, ref_channels = 'average', projection = True,) raw_ref.apply_proj() # it might tell you it already has been re-referenced, but do it anyway # everytime before filtering, explicitly pick the type of channels you want # to perform the filters picks = mne.pick_types(raw_ref.info, meg = False, # No MEG eeg = True, # YES EEG eog = perform_ICA, # depends on ICA ) # regardless the bandpass filtering later, we should always filter # for wire artifacts and their oscillations raw_ref.notch_filter(np.arange(notch_filter,241,notch_filter), picks = picks) if lowpass is not None: raw_ref.filter(None,lowpass,) epochs = mne.Epochs(raw_ref, events, # numpy array event_id, # dictionary tmin = tmin, tmax = tmax, baseline = baseline, # range of time for computing the mean references for each channel and subtract these values from all the time points per channel picks = picks, detrend = 1, # detrend preload = True # must be true if we want to do further processing ) """ 1. if necessary, perform ICA """ if perform_ICA: picks = mne.pick_types(epochs.info, eeg = True, # YES EEG eog = False # NO EOG ) if interpolate_bad_channels: interpolation_list = faster_bad_channels(epochs,picks=picks) for ch_name in interpolation_list: epochs.info['bads'].append(ch_name) epochs = epochs.interpolate_bads() # ar = AutoReject( # picks = picks, # random_state = 12345, # ) # ar.fit(epochs) # _,reject_log = ar.transform(epochs,return_log=True) # calculate the noise covariance of the epochs noise_cov = mne.compute_covariance(epochs,#[~reject_log.bad_epochs], tmin = baseline[0], tmax = baseline[1], method = 'empirical', rank = None,) # define an ica function ica = mne.preprocessing.ICA(n_components = .99, n_pca_components = .99, max_pca_components = None, method = 'infomax', max_iter = int(3e3), noise_cov = noise_cov, random_state = 12345,) picks = mne.pick_types(epochs.info, eeg = True, # YES EEG eog = False # NO EOG ) ica.fit(epochs,#[~reject_log.bad_epochs], picks = picks, start = tmin, stop = tmax, decim = 3, tstep = 1. # Length of data chunks for artifact rejection in seconds. It only applies if inst is of type Raw. ) # search for artificial ICAs automatically # most of these hyperparameters were used in a unrelated published study ica.detect_artifacts(epochs,#[~reject_log.bad_epochs], eog_ch = ['FT9','FT10','TP9','TP10'], eog_criterion = 0.4, # arbitary choice skew_criterion = 1, # arbitary choice kurt_criterion = 1, # arbitary choice var_criterion = 1, # arbitary choice ) picks = mne.pick_types(epochs.info, eeg = True, # YES EEG eog = False # NO EOG ) epochs_ica = ica.apply(epochs,#,[~reject_log.bad_epochs], exclude = ica.exclude, ) epochs = epochs_ica.copy() else: picks = mne.pick_types(epochs.info, eeg = True, # YES EEG eog = False # NO EOG ) if interpolate_bad_channels: interpolation_list = faster_bad_channels(epochs,picks=picks) for ch_name in interpolation_list: epochs.info['bads'].append(ch_name) epochs = epochs.interpolate_bads() # pick the EEG channels for later use clean_epochs = epochs.pick_types(eeg = True, eog = False) return clean_epochs def preprocessing_unconscious(raw, events, session, tmin = -0, tmax = 1, notch_filter = 50, event_id = {'living':1,'nonliving':2}, baseline = (None,None), perform_ICA = False, eog_chs = [], ecg_chs = [],): # everytime before filtering, explicitly pick the type of channels you want # to perform the filters picks = mne.pick_types(raw.info, meg = True, # No MEG eeg = False, # NO EEG eog = True, # YES EOG ecg = True, # YES ECG ) # regardless the bandpass filtering later, we should always filter # for wire artifacts and their oscillations if type(notch_filter) is list: for item in notch_filter: raw.notch_filter(np.arange(item,301,item), picks = picks) else: raw.notch_filter(np.arange(notch_filter,301,notch_filter), picks = picks) # filter EOG and ECG channels picks = mne.pick_types(raw.info, meg = False, eeg = False, eog = True, ecg = True,) raw.filter(1,12,picks = picks,) # epoch the data picks = mne.pick_types(raw.info, meg = True, eog = True, ecg = True, ) epochs = mne.Epochs(raw, events, # numpy array event_id, # dictionary tmin = tmin, tmax = tmax, baseline = baseline, # range of time for computing the mean references for each channel and subtract these values from all the time points per channel picks = picks, detrend = 1, # detrend preload = True # must be true if we want to do further processing ) """ 1. if necessary, perform ICA """ if perform_ICA: picks = mne.pick_types(epochs.info, meg = True, # YES MEG eeg = False, # NO EEG eog = False, # NO EOG ecg = False, # NO ECG ) # ar = AutoReject( # picks = picks, # random_state = 12345, # ) # ar.fit(epochs) # _,reject_log = ar.transform(epochs,return_log=True) # calculate the noise covariance of the epochs noise_cov = mne.compute_covariance(epochs,#[~reject_log.bad_epochs], tmin = tmin, tmax = 0, method = 'empirical', rank = None,) # define an ica function ica = mne.preprocessing.ICA(n_components = .99, n_pca_components = .99, max_pca_components = None, method = 'extended-infomax', max_iter = int(3e3), noise_cov = noise_cov, random_state = 12345,) picks = mne.pick_types(epochs.info, eeg = True, # YES EEG eog = False # NO EOG ) ica.fit(epochs,#[~reject_log.bad_epochs], picks = picks, start = tmin, stop = tmax, decim = 3, tstep = 1. # Length of data chunks for artifact rejection in seconds. It only applies if inst is of type Raw. ) # search for artificial ICAs automatically # most of these hyperparameters were used in a unrelated published study ica.detect_artifacts(epochs,#[~reject_log.bad_epochs], eog_ch = eog_chs, ecg_ch = ecg_chs[0], eog_criterion = 0.4, # arbitary choice ecg_criterion = 0.1, # arbitary choice skew_criterion = 1, # arbitary choice kurt_criterion = 1, # arbitary choice var_criterion = 1, # arbitary choice ) epochs_ica = ica.apply(epochs,#,[~reject_log.bad_epochs], exclude = ica.exclude, ) epochs = epochs_ica.copy() # pick the EEG channels for later use clean_epochs = epochs.pick_types(meg = True, eeg = True, eog = False) return clean_epochs def _preprocessing_conscious( raw,events,session, n_interpolates = np.arange(1,32,4), consensus_pers = np.linspace(0,1.0,11), event_id = {'living':1,'nonliving':2}, tmin = -0.15, tmax = 0.15 * 6, high_pass = 0.001, low_pass = 30, notch_filter = 50, fix = False, ICA = False, logging = None, filtering = False,): """ Preprocessing pipeline for conscious trials Inputs ------------------- raw: MNE Raw object, contineous EEG raw data events: Numpy array with 3 columns, where the first column indicates time and the last column indicates event code n_interpolates: list of values 1 <= N <= max number of channels consensus_pers: ?? autoreject hyperparameter search grid event_id: MNE argument, to control for epochs tmin: first time stamp of the epoch tmax: last time stamp of the epoch high_pass: low cutoff of the bandpass filter low_pass: high cutoff of the bandpass filter notch_filter: frequency of the notch filter, 60 in US and 50 in Europe fix : when "True", apply autoReject algorithm to remove artifacts that was not identifed in the ICA procedure Output ICA : when "True", apply ICA artifact correction in ICA space logging: when not "None", output some log files for us to track the process ------------------- Epochs: MNE Epochs object, segmented and cleaned EEG data (n_trials x n_channels x n_times) """ """ 0. re-reference - explicitly """ raw_ref ,_ = mne.set_eeg_reference(raw, ref_channels = 'average', projection = True,) raw_ref.apply_proj() # it might tell you it already has been re-referenced, but do it anyway """ 1. highpass filter by a 4th order zero-phase Butterworth filter """ # everytime before filtering, explicitly pick the type of channels you want # to perform the filters picks = mne.pick_types(raw_ref.info, meg = False, # No MEG eeg = True, # YES EEG eog = True, # YES EOG ) # regardless the bandpass filtering later, we should always filter # for wire artifacts and their oscillations raw_ref.notch_filter(np.arange(notch_filter,241,notch_filter), picks = picks) # high pass filtering picks = mne.pick_types(raw_ref.info, meg = False, # No MEG eeg = True, # YES EEG eog = False, # No EOG ) if filtering: raw_ref.filter(high_pass, None, picks = picks, filter_length = 'auto', # the filter length is chosen based on the size of the transition regions (6.6 times the reciprocal of the shortest transition band for fir_window=’hamming’ and fir_design=”firwin2”, and half that for “firwin”) l_trans_bandwidth= high_pass, method = 'fir', # overlap-add FIR filtering phase = 'zero', # the delay of this filter is compensated for fir_window = 'hamming', # The window to use in FIR design fir_design = 'firwin2', # a time-domain design technique that generally gives improved attenuation using fewer samples than “firwin2” ) """ 2. epoch the data """ picks = mne.pick_types(raw_ref.info, eeg = True, # YES EEG eog = True, # YES EOG ) epochs = mne.Epochs(raw_ref, events, # numpy array event_id, # dictionary tmin = tmin, tmax = tmax, baseline = (tmin,- (1 / 60 * 20)), # range of time for computing the mean references for each channel and subtract these values from all the time points per channel picks = picks, detrend = 1, # linear detrend preload = True # must be true if we want to do further processing ) """ 4. ica on epoch data """ if ICA: """ 3. apply autoreject """ picks = mne.pick_types(epochs.info, eeg = True, # YES EEG eog = False # NO EOG ) ar = AutoReject( # n_interpolate = n_interpolates, # consensus = consensus_pers, # thresh_method = 'bayesian_optimization', picks = picks, random_state = 12345, # n_jobs = 1, # verbose = 'progressbar', ) ar.fit(epochs) _,reject_log = ar.transform(epochs,return_log=True) if logging is not None: fig = plot_EEG_autoreject_log(ar) fig.savefig(logging,bbox_inches = 'tight') for key in epochs.event_id.keys(): evoked = epochs[key].average() fig_ = evoked.plot_joint(title = key) fig_.savefig(logging.replace('.png',f'_{key}_pre.png'), bbox_inches = 'tight') plt.close('all') # calculate the noise covariance of the epochs noise_cov = mne.compute_covariance(epochs[~reject_log.bad_epochs], tmin = tmin, tmax = tmax, method = 'empirical', rank = None,) # define an ica function ica = mne.preprocessing.ICA(n_components = .99, n_pca_components = .99, max_pca_components = None, method = 'extended-infomax', max_iter = int(3e3), noise_cov = noise_cov, random_state = 12345,) # # search for a global rejection threshold globally # reject = get_rejection_threshold(epochs[~reject_log.bad_epochs], # decim = 1, # random_state = 12345) picks = mne.pick_types(epochs.info, eeg = True, # YES EEG eog = False # NO EOG ) ica.fit(epochs[~reject_log.bad_epochs], picks = picks, start = tmin, stop = tmax, # reject = reject, # if some data in a window has values that exceed the rejection threshold, this window will be ignored when computing the ICA decim = 3, tstep = 1. # Length of data chunks for artifact rejection in seconds. It only applies if inst is of type Raw. ) # search for artificial ICAs automatically # most of these hyperparameters were used in a unrelated published study ica.detect_artifacts(epochs[~reject_log.bad_epochs], eog_ch = ['FT9','FT10','TP9','TP10'], eog_criterion = 0.4, # arbitary choice skew_criterion = 2, # arbitary choice kurt_criterion = 2, # arbitary choice var_criterion = 2, # arbitary choice ) # # explicitly search for eog ICAs # eog_idx,scores = ica.find_bads_eog(raw_ref, # start = tmin, # stop = tmax, # l_freq = 2, # h_freq = 10, # ) # ica.exclude += eog_idx picks = mne.pick_types(epochs.info, eeg = True, # YES EEG eog = False # NO EOG ) epochs_ica = ica.apply(epochs,#,[~reject_log.bad_epochs], exclude = ica.exclude, ) else: picks = mne.pick_types(epochs.info, eeg = True, eog = False,) # epochs.filter(None, # low_pass, # picks = picks, # filter_length = 'auto', # the filter length is chosen based on the size of the transition regions (6.6 times the reciprocal of the shortest transition band for fir_window=’hamming’ and fir_design=”firwin2”, and half that for “firwin”) # method = 'fir', # overlap-add FIR filtering # phase = 'zero', # the delay of this filter is compensated for # fir_window = 'hamming', # The window to use in FIR design # fir_design = 'firwin2', # a time-domain design technique that generally gives improved attenuation using fewer samples than “firwin2” # ) if logging is not None: for key in epochs.event_id.keys(): evoked = epochs[key].average() fig_ = evoked.plot_joint(title = key) fig_.savefig(logging.replace('.png',f'_{key}_post.png'), bbox_inches = 'tight') plt.close('all') return epochs if fix: """ """ ar = AutoReject( # n_interpolate = n_interpolates, # consensus = consensus_pers, # thresh_method = 'bayesian_optimization', picks = picks, random_state = 12345, # n_jobs = 1, # verbose = 'progressbar', ) epochs_clean = ar.fit_transform(epochs_ica, ) return epochs_clean.pick_types(eeg=True,eog=False) else: clean_epochs = epochs_ica.pick_types(eeg = True, eog = False) picks = mne.pick_types(clean_epochs.info, eeg = True, eog = False,) # clean_epochs.filter(None, # low_pass, # picks = picks, # filter_length = 'auto', # the filter length is chosen based on the size of the transition regions (6.6 times the reciprocal of the shortest transition band for fir_window=’hamming’ and fir_design=”firwin2”, and half that for “firwin”) # method = 'fir', # overlap-add FIR filtering # phase = 'zero', # the delay of this filter is compensated for # fir_window = 'hamming', # The window to use in FIR design # fir_design = 'firwin2', # a time-domain design technique that generally gives improved attenuation using fewer samples than “firwin2” # ) if logging is not None: for key in clean_epochs.event_id.keys(): evoked = epochs[key].average() fig_ = evoked.plot_joint(title = key) fig_.savefig(logging.replace('.png',f'_{key}_post.png'), bbox_inches = 'tight') plt.close('all') return clean_epochs def plot_temporal_decoding(times, scores, frames, ii, conscious_state, plscores, n_splits, ylim = (0.2,0.8)): scores_mean = scores.mean(0) scores_se = scores.std(0) / np.sqrt(n_splits) fig,ax = plt.subplots(figsize = (16,8)) ax.plot(times,scores_mean, color = 'k', alpha = .9, label = f'Average across {n_splits} folds', ) ax.fill_between(times, scores_mean + scores_se, scores_mean - scores_se, color = 'red', alpha = 0.4, label = 'Standard Error',) ax.axhline(0.5, linestyle = '--', color = 'k', alpha = 0.7, label = 'Chance level') ax.axvline(0, linestyle = '--', color = 'blue', alpha = 0.7, label = 'Probe onset',) if ii is not None: ax.axvspan(frames[ii][1] * (1 / 100) - frames[ii][2] * (1 / 100), frames[ii][1] * (1 / 100) + frames[ii][2] * (1 / 100), color = 'blue', alpha = 0.3, label = 'probe offset ave +/- std',) ax.set(xlim = (times.min(), times.max()), ylim = ylim,#(0.4,0.6), title = f'Temporal decoding of {conscious_state} = {plscores.mean():.3f}+/-{plscores.std():.3f}', ) ax.legend() return fig,ax def plot_temporal_generalization(scores_gen_, times, ii, conscious_state, frames, vmin = 0.4, vmax = 0.6): fig, ax = plt.subplots(figsize = (10,10)) if len(scores_gen_.shape) > 2: scores_gen_ = scores_gen_.mean(0) im = ax.imshow( scores_gen_, interpolation = 'hamming', origin = 'lower', cmap = 'RdBu_r', extent = times[[0, -1, 0, -1]], vmin = vmin, vmax = vmax, ) ax.set_xlabel('Testing Time (s)') ax.set_ylabel('Training Time (s)') ax.set_title(f'Temporal generalization of {conscious_state}') ax.axhline(0., linestyle = '--', color = 'black', alpha = 0.7, label = 'Probe onset',) ax.axvline(0., linestyle = '--', color = 'black', alpha = 0.7, ) if ii is not None: ax.axhspan(frames[ii][1] * (1 / 100) - frames[ii][2] * (1 / 100), frames[ii][1] * (1 / 100) + frames[ii][2] * (1 / 100), color = 'black', alpha = 0.2, label = 'probe offset ave +/- std',) ax.axvspan(frames[ii][1] * (1 / 100) - frames[ii][2] * (1 / 100), frames[ii][1] * (1 / 100) + frames[ii][2] * (1 / 100), color = 'black', alpha = 0.2, ) plt.colorbar(im, ax = ax) ax.legend() return fig,ax def plot_t_stats(T_obs, clusters, cluster_p_values, times, ii, conscious_state, frames,): # since the p values of each cluster is corrected for multiple comparison, # we could directly use 0.05 as the threshold to filter clusters T_obs_plot = 0 * np.ones_like(T_obs) k = np.array([np.sum(c) for c in clusters]) if np.max(k) > 1000: c_thresh = 1000 elif 1000 > np.max(k) > 500: c_thresh = 500 elif 500 > np.max(k) > 100: c_thresh = 100 elif 100 > np.max(k) > 10: c_thresh = 10 else: c_thresh = 0 for c, p_val in zip(clusters, cluster_p_values): if (p_val <= 0.01) and (np.sum(c) >= c_thresh):# and (distance.cdist(np.where(c == True)[0].reshape(1,-1),np.where(c == True)[1].reshape(1,-1))[0][0] < 200):# and (np.sum(c) >= c_thresh): T_obs_plot[c] = T_obs[c] # defind the range of the colorbar vmax = np.max(np.abs(T_obs)) vmin = -vmax# - 2 * t_threshold plt.close('all') fig,ax = plt.subplots(figsize=(10,10)) im = ax.imshow(T_obs_plot, origin = 'lower', cmap = plt.cm.RdBu_r,# to emphasize the clusters extent = times[[0, -1, 0, -1]], vmin = vmin, vmax = vmax, interpolation = 'lanczos', ) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size = "5%", pad = 0.2) cb = plt.colorbar(im, cax = cax, ticks = np.linspace(vmin,vmax,3)) cb.ax.set(title = 'T Statistics') ax.plot([times[0],times[-1]],[times[0],times[-1]], linestyle = '--', color = 'black', alpha = 0.7, ) ax.axhline(0., linestyle = '--', color = 'black', alpha = 0.7, label = 'Probe onset',) ax.axvline(0., linestyle = '--', color = 'black', alpha = 0.7, ) if ii is not None: ax.axhspan(frames[ii][1] * (1 / 100) - frames[ii][2] * (1 / 100), frames[ii][1] * (1 / 100) + frames[ii][2] * (1 / 100), color = 'black', alpha = 0.2, label = 'probe offset ave +/- std',) ax.axvspan(frames[ii][1] * (1 / 100) - frames[ii][2] * (1 / 100), frames[ii][1] * (1 / 100) + frames[ii][2] * (1 / 100), color = 'black', alpha = 0.2, ) ax.set(xlabel = 'Test time', ylabel = 'Train time', title = f'nonparametric t test of {conscious_state}') ax.legend() return fig,ax def plot_p_values(times, clusters, cluster_p_values, ii, conscious_state, frames): width = len(times) p_clust = np.ones((width, width))# * np.nan k = np.array([np.sum(c) for c in clusters]) if np.max(k) > 1000: c_thresh = 1000 elif 1000 > np.max(k) > 500: c_thresh = 500 elif 500 > np.max(k) > 100: c_thresh = 100 elif 100 > np.max(k) > 10: c_thresh = 10 else: c_thresh = 0 for c, p_val in zip(clusters, cluster_p_values): if (np.sum(c) >= c_thresh): p_val_ = p_val.copy() if p_val_ > 0.05: p_val_ = 1. p_clust[c] = p_val_ # defind the range of the colorbar vmax = 1. vmin = 0. plt.close('all') fig,ax = plt.subplots(figsize = (10,10)) im = ax.imshow(p_clust, origin = 'lower', cmap = plt.cm.RdBu_r,# to emphasize the clusters extent = times[[0, -1, 0, -1]], vmin = vmin, vmax = vmax, interpolation = 'hanning', ) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size = "5%", pad = 0.2) cb = plt.colorbar(im, cax = cax, ticks = [0,0.05,1]) cb.ax.set(title = 'P values') ax.plot([times[0],times[-1]],[times[0],times[-1]], linestyle = '--', color = 'black', alpha = 0.7, ) ax.axhline(0., linestyle = '--', color = 'black', alpha = 0.7, label = 'Probe onset',) ax.axvline(0., linestyle = '--', color = 'black', alpha = 0.7, ) if ii is not None: ax.axhspan(frames[ii][1] * (1 / 100) - frames[ii][2] * (1 / 100), frames[ii][1] * (1 / 100) + frames[ii][2] * (1 / 100), color = 'black', alpha = 0.2, label = 'probe offset ave +/- std',) ax.axvspan(frames[ii][1] * (1 / 100) - frames[ii][2] * (1 / 100), frames[ii][1] * (1 / 100) + frames[ii][2] * (1 / 100), color = 'black', alpha = 0.2, ) ax.set(xlabel = 'Test time', ylabel = 'Train time', title = f'p value map of {conscious_state}') ax.legend() return fig,ax def plot_EEG_autoreject_log(autoreject_object,): ar = autoreject_object loss = ar.loss_['eeg'].mean(axis=-1) # losses are stored by channel type. fig,ax = plt.subplots(figsize=(10,6)) im = ax.matshow(loss.T * 1e6, cmap=plt.get_cmap('viridis')) ax.set(xticks = range(len(ar.consensus)), xticklabels = ar.consensus.round(2), yticks = range(len(ar.n_interpolate)), yticklabels = ar.n_interpolate) # Draw rectangle at location of best parameters idx, jdx = np.unravel_index(loss.argmin(), loss.shape) rect = patches.Rectangle((idx - 0.5, jdx - 0.5), 1, 1, linewidth=2, edgecolor='r', facecolor='none') ax.add_patch(rect) ax.xaxis.set_ticks_position('bottom') ax.set(xlabel = r'Consensus percentage $\kappa$', ylabel = r'Max sensors interpolated $\rho$', title = 'Mean cross validation error (x 1e6)') plt.colorbar(im) return fig def str2int(x): if type(x) is str: return float(re.findall(r'\d+',x)[0]) else: return x def simple_load(f,idx): df = pd.read_csv(f) df['run'] = idx return df def get_frames(directory,new = True,EEG = True): if EEG: files = glob(os.path.join(directory,'*trials.csv')) # elif EEG == 'fMRI': # files = glob(os.path.join(directory,'*trials.csv')) else: files = glob(os.path.join(directory,'*','*.csv')) empty_temp = '' for ii,f in enumerate(files): df = pd.read_csv(f).dropna() for vis,df_sub in df.groupby(['visible.keys_raw']): try: print(f'session {ii+1}, vis = {vis}, n_trials = {df_sub.shape[0]}') empty_temp += f'session {ii+1}, vis = {vis}, n_trials = {df_sub.shape[0]}' empty_temp += '\n' except: print('session {}, vis = {}, n_trials = {}'.format(ii+1, vis,df_sub.shape[0])) df = pd.concat([simple_load(f,ii).dropna() for ii,f in enumerate(files)]) try: for col in ['probeFrames_raw', 'response.keys_raw', 'visible.keys_raw']: # print(df[col]) df[col] = df[col].apply(str2int) except: for col in ['probe_Frames_raw', 'response.keys_raw', 'visible.keys_raw']: # print(df[col]) df[col] = df[col].apply(str2int) df["probeFrames_raw"] = df["probe_Frames_raw"] df = df[df['probeFrames_raw'] != 999] df = df.sort_values(['run','order']) for vis,df_sub in df.groupby(['visible.keys_raw']): df_press1 = df_sub[df_sub['response.keys_raw'] == 1] df_press2 = df_sub[df_sub['response.keys_raw'] == 2] prob1 = df_press1.shape[0] / df_sub.shape[0] prob2 = df_press2.shape[0] / df_sub.shape[0] try: print(f"\nvis = {vis},mean frames = {np.median(df_sub['probeFrames_raw']):.5f}") print(f"vis = {vis},prob(press 1) = {prob1:.4f}, p(press 2) = {prob2:.4f}") empty_temp += f"\nvis = {vis},mean frames = {np.median(df_sub['probeFrames_raw']):.5f}\n" empty_temp += f"vis = {vis},prob(press 1) = {prob1:.4f}, p(press 2) = {prob2:.4f}\n" except: print("\nvis = {},mean frames = {:.5f}".format( vis,np.median(df_sub['probeFrames_raw']))) print(f"vis = {vis},prob(press 1) = {prob1:.4f}, p(press 2) = {prob2:.4f}") if new: df = [] for f in files: temp = pd.read_csv(f).dropna() try: temp[['probeFrames_raw','visible.keys_raw']] except: temp['probeFrames_raw'] = temp['probe_Frames_raw'] probeFrame = [] for ii,row in temp.iterrows(): if int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 1: probeFrame.append(row['probeFrames_raw']) elif int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 2: probeFrame.append(row['probeFrames_raw']) elif int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 3: probeFrame.append(row['probeFrames_raw']) elif int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 4: probeFrame.append(row['probeFrames_raw']) temp['probeFrames'] = probeFrame df.append(temp) df = pd.concat(df) else: df = [] for f in files: temp = pd.read_csv(f).dropna() temp[['probeFrames_raw','visible.keys_raw']] probeFrame = [] for ii,row in temp.iterrows(): if int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 1: probeFrame.append(row['probeFrames_raw'] - 2) elif int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 2: probeFrame.append(row['probeFrames_raw'] - 1) elif int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 3: probeFrame.append(row['probeFrames_raw'] + 1) elif int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 4: probeFrame.append(row['probeFrames_raw'] + 2) temp['probeFrames'] = probeFrame df.append(temp) df = pd.concat(df) df['probeFrames'] = df['probeFrames'].apply(str2int) df = df[df['probeFrames'] != 999] results = [] for vis,df_sub in df.groupby(['visible.keys_raw']): corrects = df_sub['response.corr_raw'].sum() / df_sub.shape[0] try: print(f"vis = {vis},N = {df_sub.shape[0]},mean frames = {np.mean(df_sub['probeFrames']):.2f} +/- {np.std(df_sub['probeFrames']):.2f}\np(correct) = {corrects:.4f}") empty_temp += f"vis = {vis},N = {df_sub.shape[0]},mean frames = {np.mean(df_sub['probeFrames']):.2f} +/- {np.std(df_sub['probeFrames']):.2f}\np(correct) = {corrects:.4f}\n" empty_temp += f"RT = {np.mean(df_sub['visible.rt_raw']):.3f} +/- {np.std(df_sub['visible.rt_raw']):.3f}\n" except: print("vis = {},mean frames = {:.2f} +/- {:.2f}".format( vis,np.mean(df_sub['probeFrames']),np.std(df_sub['probeFrames']))) results.append([vis,np.mean(df_sub['probeFrames']),np.std(df_sub['probeFrames'])]) return results,empty_temp def preprocess_behavioral_file(f): df = read_behavorial_file(f) for col in ['probeFrames_raw', 'response.keys_raw', 'visible.keys_raw']: df[col] = df[col].apply(str2int) df = df.sort_values(['order']) return df def read_behavorial_file(f): temp = pd.read_csv(f).iloc[:-12,:] return temp def preload(f): temp = pd.read_csv(f).iloc[-12:,:2] return temp def extract(x): try: return int(re.findall(r'\d',x)[0]) except: return int(99) #def extract_session_run_from_MRI(x): # temp = re.findall(r'\d+',x) # session = temp[1] # if int(session) == 7: # session = '1' # run = temp[-1] # return session,run #def check_behaviral_data_session_block(x): # temp = preload(x) # temp.index = temp['category'] # temp = temp.T # session = int(temp['session'].values[-1]) # block = int(temp['block'].values[-1]) # return session,block #def compare_match(behavorial_file_name,session,block): # behav_session,behav_block = check_behaviral_data_session_block(behavorial_file_name) # if np.logical_and(behav_session == session, behav_block == block): # return True # else: # return False def add_track(df_sub): n_rows = df_sub.shape[0] if len(df_sub.index.values) > 1: temp = '+'.join(str(item + 10) for item in df_sub.index.values) else: temp = str(df_sub.index.values[0]) df_sub = df_sub.iloc[0,:].to_frame().T # why did I use 1 instead of 0? df_sub['n_volume'] = n_rows df_sub['time_indices'] = temp return df_sub def groupby_average(fmri,df,groupby = ['trials']): BOLD_average = np.array([np.mean(fmri[df_sub.index],0) for _,df_sub in df.groupby(groupby)]) df_average = pd.concat([add_track(df_sub) for ii,df_sub in df.groupby(groupby)]) return BOLD_average,df_average def get_brightness_threshold(thresh): return [0.75 * val for val in thresh] def get_brightness_threshold_double(thresh): return [2 * 0.75 * val for val in thresh] def cartesian_product(fwhms, in_files, usans, btthresh): from nipype.utils.filemanip import ensure_list # ensure all inputs are lists in_files = ensure_list(in_files) fwhms = [fwhms] if isinstance(fwhms, (int, float)) else fwhms # create cartesian product lists (s_<name> = single element of list) cart_in_file = [ s_in_file for s_in_file in in_files for s_fwhm in fwhms ] cart_fwhm = [ s_fwhm for s_in_file in in_files for s_fwhm in fwhms ] cart_usans = [ s_usans for s_usans in usans for s_fwhm in fwhms ] cart_btthresh = [ s_btthresh for s_btthresh in btthresh for s_fwhm in fwhms ] return cart_in_file, cart_fwhm, cart_usans, cart_btthresh def getusans(x): return [[tuple([val[0], 0.5 * val[1]])] for val in x] def create_fsl_FEAT_workflow_func(whichrun = 0, whichvol = 'middle', workflow_name = 'nipype_mimic_FEAT', first_run = True, func_data_file = 'temp', fwhm = 3): """ Works with fsl-5.0.9 and fsl-5.0.11, but not fsl-6.0.0 """ from nipype.workflows.fmri.fsl import preprocess from nipype.interfaces import fsl from nipype.interfaces import utility as util from nipype.pipeline import engine as pe """ Setup some functions and hyperparameters """ fsl.FSLCommand.set_default_output_type('NIFTI_GZ') pickrun = preprocess.pickrun pickvol = preprocess.pickvol getthreshop = preprocess.getthreshop getmeanscale = preprocess.getmeanscale # chooseindex = preprocess.chooseindex """ Start constructing the workflow graph """ preproc = pe.Workflow(name = workflow_name) """ Initialize the input and output spaces """ inputnode = pe.Node( interface = util.IdentityInterface(fields = ['func', 'fwhm', 'anat']), name = 'inputspec') outputnode = pe.Node( interface = util.IdentityInterface(fields = ['reference', 'motion_parameters', 'realigned_files', 'motion_plots', 'mask', 'smoothed_files', 'mean']), name = 'outputspec') """ first step: convert Images to float values """ img2float = pe.MapNode( interface = fsl.ImageMaths( out_data_type = 'float', op_string = '', suffix = '_dtype'), iterfield = ['in_file'], name = 'img2float') preproc.connect(inputnode,'func', img2float,'in_file') """ delete first 10 volumes """ develVolume = pe.MapNode( interface = fsl.ExtractROI(t_min = 10, t_size = 508), iterfield = ['in_file'], name = 'remove_volumes') preproc.connect(img2float, 'out_file', develVolume, 'in_file') if first_run == True: """ extract example fMRI volume: middle one """ extract_ref = pe.MapNode( interface = fsl.ExtractROI(t_size = 1,), iterfield = ['in_file'], name = 'extractref') # connect to the deleteVolume node to get the data preproc.connect(develVolume,'roi_file', extract_ref,'in_file') # connect to the deleteVolume node again to perform the extraction preproc.connect(develVolume,('roi_file',pickvol,0,whichvol), extract_ref,'t_min') # connect to the output node to save the reference volume preproc.connect(extract_ref,'roi_file', outputnode, 'reference') if first_run == True: """ Realign the functional runs to the reference (`whichvol` volume of first run) """ motion_correct = pe.MapNode( interface = fsl.MCFLIRT(save_mats = True, save_plots = True, save_rms = True, stats_imgs = True, interpolation = 'spline'), iterfield = ['in_file','ref_file'], name = 'MCFlirt', ) # connect to the develVolume node to get the input data preproc.connect(develVolume, 'roi_file', motion_correct, 'in_file',) ###################################################################################### ################# the part where we replace the actual reference image if exists #### ###################################################################################### # connect to the develVolume node to get the reference preproc.connect(extract_ref, 'roi_file', motion_correct, 'ref_file') ###################################################################################### # connect to the output node to save the motion correction parameters preproc.connect(motion_correct, 'par_file', outputnode, 'motion_parameters') # connect to the output node to save the other files preproc.connect(motion_correct, 'out_file', outputnode, 'realigned_files') else: """ Realign the functional runs to the reference (`whichvol` volume of first run) """ motion_correct = pe.MapNode( interface = fsl.MCFLIRT(ref_file = first_run, save_mats = True, save_plots = True, save_rms = True, stats_imgs = True, interpolation = 'spline'), iterfield = ['in_file','ref_file'], name = 'MCFlirt', ) # connect to the develVolume node to get the input data preproc.connect(develVolume, 'roi_file', motion_correct, 'in_file',) # connect to the output node to save the motion correction parameters preproc.connect(motion_correct, 'par_file', outputnode, 'motion_parameters') # connect to the output node to save the other files preproc.connect(motion_correct, 'out_file', outputnode, 'realigned_files') """ plot the estimated motion parameters """ plot_motion = pe.MapNode( interface = fsl.PlotMotionParams(in_source = 'fsl'), iterfield = ['in_file'], name = 'plot_motion', ) plot_motion.iterables = ('plot_type',['rotations', 'translations', 'displacement']) preproc.connect(motion_correct, 'par_file', plot_motion, 'in_file') preproc.connect(plot_motion, 'out_file', outputnode, 'motion_plots') """ extract the mean volume of the first functional run """ meanfunc = pe.Node( interface = fsl.ImageMaths(op_string = '-Tmean', suffix = '_mean',), name = 'meanfunc') preproc.connect(motion_correct, ('out_file',pickrun,whichrun), meanfunc, 'in_file') """ strip the skull from the mean functional to generate a mask """ meanfuncmask = pe.Node( interface = fsl.BET(mask = True, no_output = True, frac = 0.3, surfaces = True,), name = 'bet2_mean_func') preproc.connect(meanfunc, 'out_file', meanfuncmask, 'in_file') """ Mask the motion corrected functional data with the mask to create the masked (bet) motion corrected functional data """ maskfunc = pe.MapNode( interface = fsl.ImageMaths(suffix = '_bet', op_string = '-mas'), iterfield = ['in_file'], name = 'maskfunc') preproc.connect(motion_correct, 'out_file', maskfunc, 'in_file') preproc.connect(meanfuncmask, 'mask_file', maskfunc, 'in_file2') """ determine the 2nd and 98th percentiles of each functional run """ getthreshold = pe.MapNode( interface = fsl.ImageStats(op_string = '-p 2 -p 98'), iterfield = ['in_file'], name = 'getthreshold') preproc.connect(maskfunc, 'out_file', getthreshold, 'in_file') """ threshold the functional data at 10% of the 98th percentile """ threshold = pe.MapNode( interface = fsl.ImageMaths(out_data_type = 'char', suffix = '_thresh', op_string = '-Tmin -bin'), iterfield = ['in_file','op_string'], name = 'tresholding') preproc.connect(maskfunc, 'out_file', threshold,'in_file') """ define a function to get 10% of the intensity """ preproc.connect(getthreshold,('out_stat',getthreshop), threshold, 'op_string') """ Determine the median value of the functional runs using the mask """ medianval = pe.MapNode( interface = fsl.ImageStats(op_string = '-k %s -p 50'), iterfield = ['in_file','mask_file'], name = 'cal_intensity_scale_factor') preproc.connect(motion_correct, 'out_file', medianval, 'in_file') preproc.connect(threshold, 'out_file', medianval, 'mask_file') """ dilate the mask """ dilatemask = pe.MapNode( interface = fsl.ImageMaths(suffix = '_dil', op_string = '-dilF'), iterfield = ['in_file'], name = 'dilatemask') preproc.connect(threshold, 'out_file', dilatemask, 'in_file') preproc.connect(dilatemask, 'out_file', outputnode, 'mask') """ mask the motion corrected functional runs with the dilated mask """ dilateMask_MCed = pe.MapNode( interface = fsl.ImageMaths(suffix = '_mask', op_string = '-mas'), iterfield = ['in_file','in_file2'], name = 'dilateMask_MCed') preproc.connect(motion_correct, 'out_file', dilateMask_MCed, 'in_file',) preproc.connect(dilatemask, 'out_file', dilateMask_MCed, 'in_file2') """ We now take this functional data that is motion corrected, high pass filtered, and create a "mean_func" image that is the mean across time (Tmean) """ meanfunc2 = pe.MapNode( interface = fsl.ImageMaths(suffix = '_mean', op_string = '-Tmean',), iterfield = ['in_file'], name = 'meanfunc2') preproc.connect(dilateMask_MCed, 'out_file', meanfunc2, 'in_file') """ smooth each run using SUSAN with the brightness threshold set to 75% of the median value for each run and a mask constituing the mean functional """ merge = pe.Node( interface = util.Merge(2, axis = 'hstack'), name = 'merge') preproc.connect(meanfunc2, 'out_file', merge, 'in1') preproc.connect(medianval,('out_stat',get_brightness_threshold_double), merge, 'in2') smooth = pe.MapNode( interface = fsl.SUSAN(dimension = 3, use_median = True), iterfield = ['in_file', 'brightness_threshold', 'fwhm', 'usans'], name = 'susan_smooth') preproc.connect(dilateMask_MCed, 'out_file', smooth, 'in_file') preproc.connect(medianval, ('out_stat',get_brightness_threshold), smooth, 'brightness_threshold') preproc.connect(inputnode, 'fwhm', smooth, 'fwhm') preproc.connect(merge, ('out',getusans), smooth, 'usans') """ mask the smoothed data with the dilated mask """ maskfunc3 = pe.MapNode( interface = fsl.ImageMaths(suffix = '_mask', op_string = '-mas'), iterfield = ['in_file','in_file2'], name = 'dilateMask_smoothed') # connect the output of the susam smooth component to the maskfunc3 node preproc.connect(smooth, 'smoothed_file', maskfunc3, 'in_file') # connect the output of the dilated mask to the maskfunc3 node preproc.connect(dilatemask, 'out_file', maskfunc3, 'in_file2') """ scale the median value of the run is set to 10000 """ meanscale = pe.MapNode( interface = fsl.ImageMaths(suffix = '_intnorm'), iterfield = ['in_file','op_string'], name = 'meanscale') preproc.connect(maskfunc3, 'out_file', meanscale, 'in_file') preproc.connect(meanscale, 'out_file', outputnode,'smoothed_files') """ define a function to get the scaling factor for intensity normalization """ preproc.connect(medianval,('out_stat',getmeanscale), meanscale,'op_string') """ generate a mean functional image from the first run should this be the 'mean.nii.gz' we will use in the future? """ meanfunc3 = pe.MapNode( interface = fsl.ImageMaths(suffix = '_mean', op_string = '-Tmean',), iterfield = ['in_file'], name = 'gen_mean_func_img') preproc.connect(meanscale, 'out_file', meanfunc3, 'in_file') preproc.connect(meanfunc3, 'out_file', outputnode,'mean') # initialize some of the input files preproc.inputs.inputspec.func = os.path.abspath(func_data_file) preproc.inputs.inputspec.fwhm = 3 preproc.base_dir = os.path.abspath('/'.join( func_data_file.split('/')[:-1])) output_dir = os.path.abspath(os.path.join( preproc.base_dir, 'outputs', 'func')) MC_dir = os.path.join(output_dir,'MC') for directories in [output_dir,MC_dir]: if not os.path.exists(directories): os.makedirs(directories) # initialize all the output files if first_run == True: preproc.inputs.extractref.roi_file = os.path.abspath(os.path.join( output_dir,'example_func.nii.gz')) preproc.inputs.dilatemask.out_file = os.path.abspath(os.path.join( output_dir,'mask.nii.gz')) preproc.inputs.meanscale.out_file = os.path.abspath(os.path.join( output_dir,'prefiltered_func.nii.gz')) preproc.inputs.gen_mean_func_img.out_file = os.path.abspath(os.path.join( output_dir,'mean_func.nii.gz')) return preproc,MC_dir,output_dir def create_registration_workflow( anat_brain, anat_head, example_func, standard_brain, standard_head, standard_mask, workflow_name = 'registration', output_dir = 'temp'): from nipype.interfaces import fsl from nipype.interfaces import utility as util from nipype.pipeline import engine as pe fsl.FSLCommand.set_default_output_type('NIFTI_GZ') registration = pe.Workflow(name = 'registration') inputnode = pe.Node( interface = util.IdentityInterface( fields = [ 'highres', # anat_brain 'highres_head', # anat_head 'example_func', 'standard', # standard_brain 'standard_head', 'standard_mask' ]), name = 'inputspec') outputnode = pe.Node( interface = util.IdentityInterface( fields = ['example_func2highres_nii_gz', 'example_func2highres_mat', 'linear_example_func2highres_log', 'highres2example_func_mat', 'highres2standard_linear_nii_gz', 'highres2standard_mat', 'linear_highres2standard_log', 'highres2standard_nii_gz', 'highres2standard_warp_nii_gz', 'highres2standard_head_nii_gz', # 'highres2standard_apply_warp_nii_gz', 'highres2highres_jac_nii_gz', 'nonlinear_highres2standard_log', 'highres2standard_nii_gz', 'standard2highres_mat', 'example_func2standard_mat', 'example_func2standard_warp_nii_gz', 'example_func2standard_nii_gz', 'standard2example_func_mat', ]), name = 'outputspec') """ fslmaths /bcbl/home/public/Consciousness/uncon_feat/data/MRI/sub-01/anat/sub-01-T1W_mprage_sag_p2_1iso_MGH_day_6_nipy_brain highres fslmaths /bcbl/home/public/Consciousness/uncon_feat/data/MRI/sub-01/anat/sub-01-T1W_mprage_sag_p2_1iso_MGH_day_6_nipy_brain highres_head fslmaths /opt/fsl/fsl-5.0.9/fsl/data/standard/MNI152_T1_2mm_brain standard fslmaths /opt/fsl/fsl-5.0.9/fsl/data/standard/MNI152_T1_2mm standard_head fslmaths /opt/fsl/fsl-5.0.9/fsl/data/standard/MNI152_T1_2mm_brain_mask_dil standard_mask """ # skip """ /opt/fsl/fsl-5.0.10/fsl/bin/flirt -in example_func -ref highres -out example_func2highres -omat example_func2highres.mat -cost corratio -dof 7 -searchrx -180 180 -searchry -180 180 -searchrz -180 180 -interp trilinear """ linear_example_func2highres = pe.MapNode( interface = fsl.FLIRT(cost = 'corratio', interp = 'trilinear', dof = 7, save_log = True, searchr_x = [-180, 180], searchr_y = [-180, 180], searchr_z = [-180, 180],), iterfield = ['in_file','reference'], name = 'linear_example_func2highres') registration.connect(inputnode, 'example_func', linear_example_func2highres, 'in_file') registration.connect(inputnode, 'highres', linear_example_func2highres, 'reference') registration.connect(linear_example_func2highres, 'out_file', outputnode, 'example_func2highres_nii_gz') registration.connect(linear_example_func2highres, 'out_matrix_file', outputnode, 'example_func2highres_mat') registration.connect(linear_example_func2highres, 'out_log', outputnode, 'linear_example_func2highres_log') """ /opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm -inverse -omat highres2example_func.mat example_func2highres.mat """ get_highres2example_func = pe.MapNode( interface = fsl.ConvertXFM(invert_xfm = True), iterfield = ['in_file'], name = 'get_highres2example_func') registration.connect(linear_example_func2highres,'out_matrix_file', get_highres2example_func,'in_file') registration.connect(get_highres2example_func,'out_file', outputnode,'highres2example_func_mat') """ /opt/fsl/fsl-5.0.10/fsl/bin/flirt -in highres -ref standard -out highres2standard -omat highres2standard.mat -cost corratio -dof 12 -searchrx -180 180 -searchry -180 180 -searchrz -180 180 -interp trilinear """ linear_highres2standard = pe.MapNode( interface = fsl.FLIRT(cost = 'corratio', interp = 'trilinear', dof = 12, save_log = True, searchr_x = [-180, 180], searchr_y = [-180, 180], searchr_z = [-180, 180],), iterfield = ['in_file','reference'], name = 'linear_highres2standard') registration.connect(inputnode,'highres', linear_highres2standard,'in_file') registration.connect(inputnode,'standard', linear_highres2standard,'reference',) registration.connect(linear_highres2standard,'out_file', outputnode,'highres2standard_linear_nii_gz') registration.connect(linear_highres2standard,'out_matrix_file', outputnode,'highres2standard_mat') registration.connect(linear_highres2standard,'out_log', outputnode,'linear_highres2standard_log') """ /opt/fsl/fsl-5.0.10/fsl/bin/fnirt --iout=highres2standard_head --in=highres_head --aff=highres2standard.mat --cout=highres2standard_warp --iout=highres2standard --jout=highres2highres_jac --config=T1_2_MNI152_2mm --ref=standard_head --refmask=standard_mask --warpres=10,10,10 """ nonlinear_highres2standard = pe.MapNode( interface = fsl.FNIRT(warp_resolution = (10,10,10), config_file = "T1_2_MNI152_2mm"), iterfield = ['in_file','ref_file','affine_file','refmask_file'], name = 'nonlinear_highres2standard') # -- iout registration.connect(nonlinear_highres2standard,'warped_file', outputnode,'highres2standard_head_nii_gz') # --in registration.connect(inputnode,'highres', nonlinear_highres2standard,'in_file') # --aff registration.connect(linear_highres2standard,'out_matrix_file', nonlinear_highres2standard,'affine_file') # --cout registration.connect(nonlinear_highres2standard,'fieldcoeff_file', outputnode,'highres2standard_warp_nii_gz') # --jout registration.connect(nonlinear_highres2standard,'jacobian_file', outputnode,'highres2highres_jac_nii_gz') # --ref registration.connect(inputnode,'standard_head', nonlinear_highres2standard,'ref_file',) # --refmask registration.connect(inputnode,'standard_mask', nonlinear_highres2standard,'refmask_file') # log registration.connect(nonlinear_highres2standard,'log_file', outputnode,'nonlinear_highres2standard_log') """ /opt/fsl/fsl-5.0.10/fsl/bin/applywarp -i highres -r standard -o highres2standard -w highres2standard_warp """ warp_highres2standard = pe.MapNode( interface = fsl.ApplyWarp(), iterfield = ['in_file','ref_file','field_file'], name = 'warp_highres2standard') registration.connect(inputnode,'highres', warp_highres2standard,'in_file') registration.connect(inputnode,'standard', warp_highres2standard,'ref_file') registration.connect(warp_highres2standard,'out_file', outputnode,'highres2standard_nii_gz') registration.connect(nonlinear_highres2standard,'fieldcoeff_file', warp_highres2standard,'field_file') """ /opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm -inverse -omat standard2highres.mat highres2standard.mat """ get_standard2highres = pe.MapNode( interface = fsl.ConvertXFM(invert_xfm = True), iterfield = ['in_file'], name = 'get_standard2highres') registration.connect(linear_highres2standard,'out_matrix_file', get_standard2highres,'in_file') registration.connect(get_standard2highres,'out_file', outputnode,'standard2highres_mat') """ /opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm -omat example_func2standard.mat -concat highres2standard.mat example_func2highres.mat """ get_exmaple_func2standard = pe.MapNode( interface = fsl.ConvertXFM(concat_xfm = True), iterfield = ['in_file','in_file2'], name = 'get_exmaple_func2standard') registration.connect(linear_example_func2highres, 'out_matrix_file', get_exmaple_func2standard,'in_file') registration.connect(linear_highres2standard,'out_matrix_file', get_exmaple_func2standard,'in_file2') registration.connect(get_exmaple_func2standard,'out_file', outputnode,'example_func2standard_mat') """ /opt/fsl/fsl-5.0.10/fsl/bin/convertwarp --ref=standard --premat=example_func2highres.mat --warp1=highres2standard_warp --out=example_func2standard_warp """ convertwarp_example2standard = pe.MapNode( interface = fsl.ConvertWarp(), iterfield = ['reference','premat','warp1'], name = 'convertwarp_example2standard') registration.connect(inputnode,'standard', convertwarp_example2standard,'reference') registration.connect(linear_example_func2highres,'out_matrix_file', convertwarp_example2standard,'premat') registration.connect(nonlinear_highres2standard,'fieldcoeff_file', convertwarp_example2standard,'warp1') registration.connect(convertwarp_example2standard,'out_file', outputnode,'example_func2standard_warp_nii_gz') """ /opt/fsl/fsl-5.0.10/fsl/bin/applywarp --ref=standard --in=example_func --out=example_func2standard --warp=example_func2standard_warp """ warp_example2stand = pe.MapNode( interface = fsl.ApplyWarp(), iterfield = ['ref_file','in_file','field_file'], name = 'warp_example2stand') registration.connect(inputnode,'standard', warp_example2stand,'ref_file') registration.connect(inputnode,'example_func', warp_example2stand,'in_file') registration.connect(warp_example2stand,'out_file', outputnode,'example_func2standard_nii_gz') registration.connect(convertwarp_example2standard,'out_file', warp_example2stand,'field_file') """ /opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm -inverse -omat standard2example_func.mat example_func2standard.mat """ get_standard2example_func = pe.MapNode( interface = fsl.ConvertXFM(invert_xfm = True), iterfield = ['in_file'], name = 'get_standard2example_func') registration.connect(get_exmaple_func2standard,'out_file', get_standard2example_func,'in_file') registration.connect(get_standard2example_func,'out_file', outputnode,'standard2example_func_mat') registration.base_dir = output_dir registration.inputs.inputspec.highres = anat_brain registration.inputs.inputspec.highres_head= anat_head registration.inputs.inputspec.example_func = example_func registration.inputs.inputspec.standard = standard_brain registration.inputs.inputspec.standard_head = standard_head registration.inputs.inputspec.standard_mask = standard_mask # define all the oupput file names with the directory registration.inputs.linear_example_func2highres.out_file = os.path.abspath(os.path.join(output_dir, 'example_func2highres.nii.gz')) registration.inputs.linear_example_func2highres.out_matrix_file = os.path.abspath(os.path.join(output_dir, 'example_func2highres.mat')) registration.inputs.linear_example_func2highres.out_log = os.path.abspath(os.path.join(output_dir, 'linear_example_func2highres.log')) registration.inputs.get_highres2example_func.out_file = os.path.abspath(os.path.join(output_dir, 'highres2example_func.mat')) registration.inputs.linear_highres2standard.out_file = os.path.abspath(os.path.join(output_dir, 'highres2standard_linear.nii.gz')) registration.inputs.linear_highres2standard.out_matrix_file = os.path.abspath(os.path.join(output_dir, 'highres2standard.mat')) registration.inputs.linear_highres2standard.out_log = os.path.abspath(os.path.join(output_dir, 'linear_highres2standard.log')) # --iout registration.inputs.nonlinear_highres2standard.warped_file = os.path.abspath(os.path.join(output_dir, 'highres2standard.nii.gz')) # --cout registration.inputs.nonlinear_highres2standard.fieldcoeff_file = os.path.abspath(os.path.join(output_dir, 'highres2standard_warp.nii.gz')) # --jout registration.inputs.nonlinear_highres2standard.jacobian_file = os.path.abspath(os.path.join(output_dir, 'highres2highres_jac.nii.gz')) registration.inputs.nonlinear_highres2standard.log_file = os.path.abspath(os.path.join(output_dir, 'nonlinear_highres2standard.log')) registration.inputs.warp_highres2standard.out_file = os.path.abspath(os.path.join(output_dir, 'highres2standard.nii.gz')) registration.inputs.get_standard2highres.out_file = os.path.abspath(os.path.join(output_dir, 'standard2highres.mat')) registration.inputs.get_exmaple_func2standard.out_file = os.path.abspath(os.path.join(output_dir, 'example_func2standard.mat')) registration.inputs.convertwarp_example2standard.out_file = os.path.abspath(os.path.join(output_dir, 'example_func2standard_warp.nii.gz')) registration.inputs.warp_example2stand.out_file = os.path.abspath(os.path.join(output_dir, 'example_func2standard.nii.gz')) registration.inputs.get_standard2example_func.out_file = os.path.abspath(os.path.join(output_dir, 'standard2example_func.mat')) return registration def _create_registration_workflow(anat_brain, anat_head, func_ref, standard_brain, standard_head, standard_mask, output_dir = 'temp'): from nipype.interfaces import fsl """ fslmaths /bcbl/home/public/Consciousness/uncon_feat/data/MRI/sub-01/anat/sub-01-T1W_mprage_sag_p2_1iso_MGH_day_6_nipy_brain highres fslmaths /bcbl/home/public/Consciousness/uncon_feat/data/MRI/sub-01/anat/sub-01-T1W_mprage_sag_p2_1iso_MGH_day_6_nipy_brain highres_head fslmaths /opt/fsl/fsl-5.0.9/fsl/data/standard/MNI152_T1_2mm_brain standard fslmaths /opt/fsl/fsl-5.0.9/fsl/data/standard/MNI152_T1_2mm standard_head fslmaths /opt/fsl/fsl-5.0.9/fsl/data/standard/MNI152_T1_2mm_brain_mask_dil standard_mask """ fslmaths = fsl.ImageMaths() fslmaths.inputs.in_file = anat_brain fslmaths.inputs.out_file = os.path.abspath(os.path.join(output_dir,'highres.nii.gz')) fslmaths.cmdline fslmaths.run() fslmaths = fsl.ImageMaths() fslmaths.inputs.in_file = anat_head fslmaths.inputs.out_file = os.path.abspath(os.path.join(output_dir,'highres_head.nii.gz')) fslmaths.cmdline fslmaths.run() fslmaths = fsl.ImageMaths() fslmaths.inputs.in_file = standard_brain fslmaths.inputs.out_file = os.path.abspath(os.path.join(output_dir,'standard.nii.gz')) fslmaths.cmdline fslmaths.run() fslmaths = fsl.ImageMaths() fslmaths.inputs.in_file = standard_head fslmaths.inputs.out_file = os.path.abspath(os.path.join(output_dir,'standard_head.nii.gz')) fslmaths.cmdline fslmaths.run() fslmaths = fsl.ImageMaths() fslmaths.inputs.in_file = standard_mask fslmaths.inputs.out_file = os.path.abspath(os.path.join(output_dir,'standard_mask.nii.gz')) fslmaths.cmdline fslmaths.run() """ /opt/fsl/fsl-5.0.10/fsl/bin/flirt -in example_func -ref highres -out example_func2highres -omat example_func2highres.mat -cost corratio -dof 7 -searchrx -180 180 -searchry -180 180 -searchrz -180 180 -interp trilinear """ flt = fsl.FLIRT() flt.inputs.in_file = func_ref flt.inputs.reference = anat_brain flt.inputs.out_file = os.path.abspath(os.path.join(output_dir,'example_func2highres.nii.gz')) flt.inputs.out_matrix_file = os.path.abspath(os.path.join(output_dir,'example_func2highres.mat')) flt.inputs.out_log = os.path.abspath(os.path.join(output_dir,'example_func2highres.log')) flt.inputs.cost = 'corratio' flt.inputs.interp = 'trilinear' flt.inputs.searchr_x = [-180, 180] flt.inputs.searchr_y = [-180, 180] flt.inputs.searchr_z = [-180, 180] flt.inputs.dof = 7 flt.inputs.save_log = True flt.cmdline flt.run() """ /opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm -inverse -omat highres2example_func.mat example_func2highres.mat """ inverse_transformer = fsl.ConvertXFM() inverse_transformer.inputs.in_file = os.path.abspath(os.path.join(output_dir,"example_func2highres.mat")) inverse_transformer.inputs.invert_xfm = True inverse_transformer.inputs.out_file = os.path.abspath(os.path.join(output_dir,'highres2example_func.mat')) inverse_transformer.cmdline inverse_transformer.run() """ /opt/fsl/fsl-5.0.10/fsl/bin/flirt -in highres -ref standard -out highres2standard -omat highres2standard.mat -cost corratio -dof 12 -searchrx -180 180 -searchry -180 180 -searchrz -180 180 -interp trilinear """ flt = fsl.FLIRT() flt.inputs.in_file = anat_brain flt.inputs.reference = standard_brain flt.inputs.out_file = os.path.abspath(os.path.join(output_dir,'highres2standard_linear.nii.gz')) flt.inputs.out_matrix_file = os.path.abspath(os.path.join(output_dir,'highres2standard.mat')) flt.inputs.out_log = os.path.abspath(os.path.join(output_dir,'highres2standard.log')) flt.inputs.cost = 'corratio' flt.inputs.interp = 'trilinear' flt.inputs.searchr_x = [-180, 180] flt.inputs.searchr_y = [-180, 180] flt.inputs.searchr_z = [-180, 180] flt.inputs.dof = 12 flt.inputs.save_log = True flt.cmdline flt.run() """ /opt/fsl/fsl-5.0.10/fsl/bin/fnirt --iout=highres2standard_head --in=highres_head --aff=highres2standard.mat --cout=highres2standard_warp --iout=highres2standard --jout=highres2highres_jac --config=T1_2_MNI152_2mm --ref=standard_head --refmask=standard_mask --warpres=10,10,10 """ fnirt_mprage = fsl.FNIRT() fnirt_mprage.inputs.warp_resolution = (10, 10, 10) # --iout name of output image fnirt_mprage.inputs.warped_file = os.path.abspath(os.path.join(output_dir, 'highres2standard.nii.gz')) # --in input image fnirt_mprage.inputs.in_file = anat_head # --aff affine transform fnirt_mprage.inputs.affine_file = os.path.abspath(os.path.join(output_dir, 'highres2standard.mat')) # --cout output file with field coefficients fnirt_mprage.inputs.fieldcoeff_file = os.path.abspath(os.path.join(output_dir, 'highres2standard_warp.nii.gz')) # --jout fnirt_mprage.inputs.jacobian_file = os.path.abspath(os.path.join(output_dir, 'highres2highres_jac.nii.gz')) # --config fnirt_mprage.inputs.config_file = 'T1_2_MNI152_2mm' # --ref fnirt_mprage.inputs.ref_file = os.path.abspath(standard_head) # --refmask fnirt_mprage.inputs.refmask_file = os.path.abspath(standard_mask) # --warpres fnirt_mprage.inputs.log_file = os.path.abspath(os.path.join(output_dir, 'highres2standard.log')) fnirt_mprage.cmdline fnirt_mprage.run() """ /opt/fsl/fsl-5.0.10/fsl/bin/applywarp -i highres -r standard -o highres2standard -w highres2standard_warp """ aw = fsl.ApplyWarp() aw.inputs.in_file = anat_brain aw.inputs.ref_file = os.path.abspath(standard_brain) aw.inputs.out_file = os.path.abspath(os.path.join(output_dir, 'highres2standard.nii.gz')) aw.inputs.field_file = os.path.abspath(os.path.join(output_dir, 'highres2standard_warp.nii.gz')) aw.cmdline aw.run() """ /opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm -inverse -omat standard2highres.mat highres2standard.mat """ inverse_transformer = fsl.ConvertXFM() inverse_transformer.inputs.in_file = os.path.abspath(os.path.join(output_dir,"highres2standard.mat")) inverse_transformer.inputs.invert_xfm = True inverse_transformer.inputs.out_file = os.path.abspath(os.path.join(output_dir,'standard2highres.mat')) inverse_transformer.cmdline inverse_transformer.run() """ /opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm -omat example_func2standard.mat -concat highres2standard.mat example_func2highres.mat """ inverse_transformer = fsl.ConvertXFM() inverse_transformer.inputs.in_file2 = os.path.abspath(os.path.join(output_dir,"highres2standard.mat")) inverse_transformer.inputs.in_file = os.path.abspath(os.path.join(output_dir, "example_func2highres.mat")) inverse_transformer.inputs.concat_xfm = True inverse_transformer.inputs.out_file = os.path.abspath(os.path.join(output_dir,'example_func2standard.mat')) inverse_transformer.cmdline inverse_transformer.run() """ /opt/fsl/fsl-5.0.10/fsl/bin/convertwarp --ref=standard --premat=example_func2highres.mat --warp1=highres2standard_warp --out=example_func2standard_warp """ warputils = fsl.ConvertWarp() warputils.inputs.reference = os.path.abspath(standard_brain) warputils.inputs.premat = os.path.abspath(os.path.join(output_dir, "example_func2highres.mat")) warputils.inputs.warp1 = os.path.abspath(os.path.join(output_dir, "highres2standard_warp.nii.gz")) warputils.inputs.out_file = os.path.abspath(os.path.join(output_dir, "example_func2standard_warp.nii.gz")) warputils.cmdline warputils.run() """ /opt/fsl/fsl-5.0.10/fsl/bin/applywarp --ref=standard --in=example_func --out=example_func2standard --warp=example_func2standard_warp """ aw = fsl.ApplyWarp() aw.inputs.ref_file = os.path.abspath(standard_brain) aw.inputs.in_file = os.path.abspath(func_ref) aw.inputs.out_file = os.path.abspath(os.path.join(output_dir, "example_func2standard.nii.gz")) aw.inputs.field_file = os.path.abspath(os.path.join(output_dir, "example_func2standard_warp.nii.gz")) aw.run() """ /opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm -inverse -omat standard2example_func.mat example_func2standard.mat """ inverse_transformer = fsl.ConvertXFM() inverse_transformer.inputs.in_file = os.path.abspath(os.path.join(output_dir, "example_func2standard.mat")) inverse_transformer.inputs.out_file = os.path.abspath(os.path.join(output_dir, "standard2example_func.mat")) inverse_transformer.inputs.invert_xfm = True inverse_transformer.cmdline inverse_transformer.run() ###################### ###### plotting ###### example_func2highres = os.path.abspath(os.path.join(output_dir, 'example_func2highres')) example_func2standard = os.path.abspath(os.path.join(output_dir, "example_func2standard")) highres2standard = os.path.abspath(os.path.join(output_dir, 'highres2standard')) highres = os.path.abspath(anat_brain) standard = os.path.abspath(standard_brain) plot_example_func2highres = f""" /opt/fsl/fsl-5.0.10/fsl/bin/slicer {example_func2highres} {highres} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ; /opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2highres}1.png ; /opt/fsl/fsl-5.0.10/fsl/bin/slicer {highres} {example_func2highres} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ; /opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2highres}2.png ; /opt/fsl/fsl-5.0.10/fsl/bin/pngappend {example_func2highres}1.png - {example_func2highres}2.png {example_func2highres}.png; /bin/rm -f sl?.png {example_func2highres}2.png /bin/rm {example_func2highres}1.png """.replace("\n"," ") plot_highres2standard = f""" /opt/fsl/fsl-5.0.10/fsl/bin/slicer {highres2standard} {standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ; /opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {highres2standard}1.png ; /opt/fsl/fsl-5.0.10/fsl/bin/slicer {standard} {highres2standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ; /opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {highres2standard}2.png ; /opt/fsl/fsl-5.0.10/fsl/bin/pngappend {highres2standard}1.png - {highres2standard}2.png {highres2standard}.png; /bin/rm -f sl?.png {highres2standard}2.png /bin/rm {highres2standard}1.png """.replace("\n"," ") plot_example_func2standard = f""" /opt/fsl/fsl-5.0.10/fsl/bin/slicer {example_func2standard} {standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ; /opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2standard}1.png ; /opt/fsl/fsl-5.0.10/fsl/bin/slicer {standard} {example_func2standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ; /opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2standard}2.png ; /opt/fsl/fsl-5.0.10/fsl/bin/pngappend {example_func2standard}1.png - {example_func2standard}2.png {example_func2standard}.png; /bin/rm -f sl?.png {example_func2standard}2.png """.replace("\n"," ") for cmdline in [plot_example_func2highres,plot_example_func2standard,plot_highres2standard]: os.system(cmdline) def create_simple_struc2BOLD(roi, roi_name, preprocessed_functional_dir, output_dir): from nipype.interfaces import fsl from nipype.pipeline import engine as pe from nipype.interfaces import utility as util fsl.FSLCommand.set_default_output_type('NIFTI_GZ') simple_workflow = pe.Workflow(name = 'struc2BOLD') inputnode = pe.Node(interface = util.IdentityInterface( fields = ['flt_in_file', 'flt_in_matrix', 'flt_reference', 'mask']), name = 'inputspec') outputnode = pe.Node(interface = util.IdentityInterface( fields = ['BODL_mask']), name = 'outputspec') """ flirt -in /export/home/dsoto/dsoto/fmri/$s/sess2/label/$i -ref /export/home/dsoto/dsoto/fmri/$s/sess2/run1_prepro1.feat/example_func.nii.gz -applyxfm -init /export/home/dsoto/dsoto/fmri/$s/sess2/run1_prepro1.feat/reg/highres2example_func.mat -out /export/home/dsoto/dsoto/fmri/$s/label/BOLD${i} """ flirt_convert = pe.MapNode( interface = fsl.FLIRT(apply_xfm = True), iterfield = ['in_file', 'reference', 'in_matrix_file'], name = 'flirt_convert') simple_workflow.connect(inputnode, 'flt_in_file', flirt_convert, 'in_file') simple_workflow.connect(inputnode, 'flt_reference', flirt_convert, 'reference') simple_workflow.connect(inputnode, 'flt_in_matrix', flirt_convert, 'in_matrix_file') """ fslmaths /export/home/dsoto/dsoto/fmri/$s/label/BOLD${i} -mul 2 -thr `fslstats /export/home/dsoto/dsoto/fmri/$s/label/BOLD${i} -p 99.6` -bin /export/home/dsoto/dsoto/fmri/$s/label/BOLD${i} """ def getthreshop(thresh): return ['-mul 2 -thr %.10f -bin' % (val) for val in thresh] getthreshold = pe.MapNode( interface = fsl.ImageStats(op_string='-p 99.6'), iterfield = ['in_file','mask_file'], name = 'getthreshold') simple_workflow.connect(flirt_convert, 'out_file', getthreshold, 'in_file') simple_workflow.connect(inputnode, 'mask', getthreshold, 'mask_file') threshold = pe.MapNode( interface = fsl.ImageMaths( suffix = '_thresh', op_string = '-mul 2 -bin'), iterfield = ['in_file','op_string'], name = 'thresholding') simple_workflow.connect(flirt_convert, 'out_file', threshold, 'in_file') simple_workflow.connect(getthreshold, ('out_stat',getthreshop), threshold, 'op_string') # simple_workflow.connect(threshold,'out_file',outputnode,'BOLD_mask') bound_by_mask = pe.MapNode( interface = fsl.ImageMaths( suffix = '_mask', op_string = '-mas'), iterfield = ['in_file','in_file2'], name = 'bound_by_mask') simple_workflow.connect(threshold, 'out_file', bound_by_mask, 'in_file') simple_workflow.connect(inputnode, 'mask', bound_by_mask, 'in_file2') simple_workflow.connect(bound_by_mask, 'out_file', outputnode, 'BOLD_mask') # setup inputspecs simple_workflow.inputs.inputspec.flt_in_file = roi simple_workflow.inputs.inputspec.flt_in_matrix = os.path.abspath(os.path.join(preprocessed_functional_dir, 'reg', 'highres2example_func.mat')) simple_workflow.inputs.inputspec.flt_reference = os.path.abspath(os.path.join(preprocessed_functional_dir, 'func', 'example_func.nii.gz')) simple_workflow.inputs.inputspec.mask = os.path.abspath(os.path.join(preprocessed_functional_dir, 'func', 'mask.nii.gz')) simple_workflow.inputs.bound_by_mask.out_file = os.path.abspath(os.path.join(output_dir, roi_name.replace('_fsl.nii.gz', '_BOLD.nii.gz'))) return simple_workflow def registration_plotting(output_dir, anat_brain, standard_brain): ###################### ###### plotting ###### try: example_func2highres = os.path.abspath(os.path.join(output_dir, 'example_func2highres')) example_func2standard = os.path.abspath(os.path.join(output_dir, 'example_func2standard_warp')) highres2standard = os.path.abspath(os.path.join(output_dir, 'highres2standard')) highres = os.path.abspath(anat_brain) standard = os.path.abspath(standard_brain) plot_example_func2highres = f""" /opt/fsl/fsl-5.0.10/fsl/bin/slicer {example_func2highres} {highres} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ; /opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2highres}1.png ; /opt/fsl/fsl-5.0.10/fsl/bin/slicer {highres} {example_func2highres} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ; /opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2highres}2.png ; /opt/fsl/fsl-5.0.10/fsl/bin/pngappend {example_func2highres}1.png - {example_func2highres}2.png {example_func2highres}.png; /bin/rm -f sl?.png {example_func2highres}2.png /bin/rm {example_func2highres}1.png """.replace("\n"," ") plot_highres2standard = f""" /opt/fsl/fsl-5.0.10/fsl/bin/slicer {highres2standard} {standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ; /opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {highres2standard}1.png ; /opt/fsl/fsl-5.0.10/fsl/bin/slicer {standard} {highres2standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ; /opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {highres2standard}2.png ; /opt/fsl/fsl-5.0.10/fsl/bin/pngappend {highres2standard}1.png - {highres2standard}2.png {highres2standard}.png; /bin/rm -f sl?.png {highres2standard}2.png /bin/rm {highres2standard}1.png """.replace("\n"," ") plot_example_func2standard = f""" /opt/fsl/fsl-5.0.10/fsl/bin/slicer {example_func2standard} {standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ; /opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2standard}1.png ; /opt/fsl/fsl-5.0.10/fsl/bin/slicer {standard} {example_func2standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ; /opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2standard}2.png ; /opt/fsl/fsl-5.0.10/fsl/bin/pngappend {example_func2standard}1.png - {example_func2standard}2.png {example_func2standard}.png; /bin/rm -f sl?.png {example_func2standard}2.png """.replace("\n"," ") for cmdline in [plot_example_func2highres, plot_example_func2standard, plot_highres2standard]: os.system(cmdline) except: print('you should not use python 2.7, update your python!!') def create_highpass_filter_workflow(workflow_name = 'highpassfiler', HP_freq = 60, TR = 0.85): from nipype.workflows.fmri.fsl import preprocess from nipype.interfaces import fsl from nipype.pipeline import engine as pe from nipype.interfaces import utility as util fsl.FSLCommand.set_default_output_type('NIFTI_GZ') getthreshop = preprocess.getthreshop getmeanscale = preprocess.getmeanscale highpass_workflow = pe.Workflow(name = workflow_name) inputnode = pe.Node(interface = util.IdentityInterface( fields = ['ICAed_file',]), name = 'inputspec') outputnode = pe.Node(interface = util.IdentityInterface( fields = ['filtered_file']), name = 'outputspec') img2float = pe.MapNode(interface = fsl.ImageMaths(out_data_type = 'float', op_string = '', suffix = '_dtype'), iterfield = ['in_file'], name = 'img2float') highpass_workflow.connect(inputnode,'ICAed_file', img2float,'in_file') getthreshold = pe.MapNode(interface = fsl.ImageStats(op_string = '-p 2 -p 98'), iterfield = ['in_file'], name = 'getthreshold') highpass_workflow.connect(img2float, 'out_file', getthreshold, 'in_file') thresholding = pe.MapNode(interface = fsl.ImageMaths(out_data_type = 'char', suffix = '_thresh', op_string = '-Tmin -bin'), iterfield = ['in_file','op_string'], name = 'thresholding') highpass_workflow.connect(img2float, 'out_file', thresholding, 'in_file') highpass_workflow.connect(getthreshold,('out_stat',getthreshop), thresholding,'op_string') dilatemask = pe.MapNode(interface = fsl.ImageMaths(suffix = '_dil', op_string = '-dilF'), iterfield = ['in_file'], name = 'dilatemask') highpass_workflow.connect(thresholding,'out_file', dilatemask,'in_file') maskfunc = pe.MapNode(interface = fsl.ImageMaths(suffix = '_mask', op_string = '-mas'), iterfield = ['in_file','in_file2'], name = 'apply_dilatemask') highpass_workflow.connect(img2float, 'out_file', maskfunc, 'in_file') highpass_workflow.connect(dilatemask, 'out_file', maskfunc, 'in_file2') medianval = pe.MapNode(interface = fsl.ImageStats(op_string = '-k %s -p 50'), iterfield = ['in_file','mask_file'], name = 'cal_intensity_scale_factor') highpass_workflow.connect(img2float, 'out_file', medianval, 'in_file') highpass_workflow.connect(thresholding, 'out_file', medianval, 'mask_file') meanscale = pe.MapNode(interface = fsl.ImageMaths(suffix = '_intnorm'), iterfield = ['in_file','op_string'], name = 'meanscale') highpass_workflow.connect(maskfunc, 'out_file', meanscale, 'in_file') highpass_workflow.connect(medianval, ('out_stat',getmeanscale), meanscale, 'op_string') meanfunc = pe.MapNode(interface = fsl.ImageMaths(suffix = '_mean', op_string = '-Tmean'), iterfield = ['in_file'], name = 'meanfunc') highpass_workflow.connect(meanscale, 'out_file', meanfunc, 'in_file') hpf = pe.MapNode(interface = fsl.ImageMaths(suffix = '_tempfilt', op_string = '-bptf %.10f -1' % (HP_freq/2/TR)), iterfield = ['in_file'], name = 'highpass_filering') highpass_workflow.connect(meanscale,'out_file', hpf, 'in_file',) addMean = pe.MapNode(interface = fsl.BinaryMaths(operation = 'add'), iterfield = ['in_file','operand_file'], name = 'addmean') highpass_workflow.connect(hpf, 'out_file', addMean, 'in_file') highpass_workflow.connect(meanfunc, 'out_file', addMean, 'operand_file') highpass_workflow.connect(addMean, 'out_file', outputnode,'filtered_file') return highpass_workflow def load_csv(f,print_ = False): temp = re.findall(r'\d+',f) n_session = int(temp[-2]) n_run = int(temp[-1]) if print_: print(n_session,n_run) df = pd.read_csv(f) df['session'] = n_session df['run'] = n_run df['id'] = df['session'] * 1000 + df['run'] * 100 + df['trials'] return df def build_model_dictionary(print_train = False, class_weight = 'balanced', remove_invariant = True, n_jobs = 1): np.random.seed(12345) svm = LinearSVC(penalty = 'l2', # default dual = True, # default tol = 1e-3, # not default random_state = 12345, # not default max_iter = int(1e3), # default class_weight = class_weight, # not default ) svm = CalibratedClassifierCV(base_estimator = svm, method = 'sigmoid', cv = 8) xgb = XGBClassifier( learning_rate = 1e-3, # not default max_depth = 10, # not default n_estimators = 100, # not default objective = 'binary:logistic', # default booster = 'gbtree', # default subsample = 0.9, # not default colsample_bytree = 0.9, # not default reg_alpha = 0, # default reg_lambda = 1, # default random_state = 12345, # not default importance_type = 'gain', # default n_jobs = n_jobs,# default to be 1 ) bagging = BaggingClassifier(base_estimator = svm, n_estimators = 30, # not default max_features = 0.9, # not default max_samples = 0.9, # not default bootstrap = True, # default bootstrap_features = True, # default random_state = 12345, # not default ) RF = SelectFromModel(xgb, prefit = False, threshold = 'median' # induce sparsity ) uni = SelectPercentile(mutual_info_classif,50) # so annoying that I cannot control the random state knn = KNeighborsClassifier() tree = DecisionTreeClassifier(random_state = 12345, class_weight = class_weight) dummy = DummyClassifier(strategy = 'uniform',random_state = 12345,) if remove_invariant: RI = VarianceThreshold() models = OrderedDict([ ['None + Dummy', make_pipeline(RI,MinMaxScaler(), dummy,)], ['None + Linear-SVM', make_pipeline(RI,MinMaxScaler(), svm,)], ['None + Ensemble-SVMs', make_pipeline(RI,MinMaxScaler(), bagging,)], ['None + KNN', make_pipeline(RI,MinMaxScaler(), knn,)], ['None + Tree', make_pipeline(RI,MinMaxScaler(), tree,)], ['PCA + Dummy', make_pipeline(RI,MinMaxScaler(), PCA(), dummy,)], ['PCA + Linear-SVM', make_pipeline(RI,MinMaxScaler(), PCA(), svm,)], ['PCA + Ensemble-SVMs', make_pipeline(RI,MinMaxScaler(), PCA(), bagging,)], ['PCA + KNN', make_pipeline(RI,MinMaxScaler(), PCA(), knn,)], ['PCA + Tree', make_pipeline(RI,MinMaxScaler(), PCA(), tree,)], ['Mutual + Dummy', make_pipeline(RI,MinMaxScaler(), uni, dummy,)], ['Mutual + Linear-SVM', make_pipeline(RI,MinMaxScaler(), uni, svm,)], ['Mutual + Ensemble-SVMs', make_pipeline(RI,MinMaxScaler(), uni, bagging,)], ['Mutual + KNN', make_pipeline(RI,MinMaxScaler(), uni, knn,)], ['Mutual + Tree', make_pipeline(RI,MinMaxScaler(), uni, tree,)], ['RandomForest + Dummy', make_pipeline(RI,MinMaxScaler(), RF, dummy,)], ['RandomForest + Linear-SVM', make_pipeline(RI,MinMaxScaler(), RF, svm,)], ['RandomForest + Ensemble-SVMs', make_pipeline(RI,MinMaxScaler(), RF, bagging,)], ['RandomForest + KNN', make_pipeline(RI,MinMaxScaler(), RF, knn,)], ['RandomForest + Tree', make_pipeline(RI,MinMaxScaler(), RF, tree,)],] ) else: models = OrderedDict([ ['None + Dummy', make_pipeline(MinMaxScaler(), dummy,)], ['None + Linear-SVM', make_pipeline(MinMaxScaler(), svm,)], ['None + Ensemble-SVMs', make_pipeline(MinMaxScaler(), bagging,)], ['None + KNN', make_pipeline(MinMaxScaler(), knn,)], ['None + Tree', make_pipeline(MinMaxScaler(), tree,)], ['PCA + Dummy', make_pipeline(MinMaxScaler(), PCA(), dummy,)], ['PCA + Linear-SVM', make_pipeline(MinMaxScaler(), PCA(), svm,)], ['PCA + Ensemble-SVMs', make_pipeline(MinMaxScaler(), PCA(), bagging,)], ['PCA + KNN', make_pipeline(MinMaxScaler(), PCA(), knn,)], ['PCA + Tree', make_pipeline(MinMaxScaler(), PCA(), tree,)], ['Mutual + Dummy', make_pipeline(MinMaxScaler(), uni, dummy,)], ['Mutual + Linear-SVM', make_pipeline(MinMaxScaler(), uni, svm,)], ['Mutual + Ensemble-SVMs', make_pipeline(MinMaxScaler(), uni, bagging,)], ['Mutual + KNN', make_pipeline(MinMaxScaler(), uni, knn,)], ['Mutual + Tree', make_pipeline(MinMaxScaler(), uni, tree,)], ['RandomForest + Dummy', make_pipeline(MinMaxScaler(), RF, dummy,)], ['RandomForest + Linear-SVM', make_pipeline(MinMaxScaler(), RF, svm,)], ['RandomForest + Ensemble-SVMs', make_pipeline(MinMaxScaler(), RF, bagging,)], ['RandomForest + KNN', make_pipeline(MinMaxScaler(), RF, knn,)], ['RandomForest + Tree', make_pipeline(MinMaxScaler(), RF, tree,)],] ) return models def get_blocks(df__,label_map,): ids = df__['id'].values chunks = df__['session'].values words = df__['labels'].values labels = np.array([label_map[item] for item in df__['targets'].values])[:,-1] sample_indecies = np.arange(len(labels)) blocks = [np.array([ids[ids == target], chunks[ids == target], words[ids == target], labels[ids == target], sample_indecies[ids == target] ]) for target in np.unique(ids) ] block_labels = np.array([np.unique(ll[-2]) for ll in blocks]).ravel() return blocks,block_labels def make_unique_class_target(df_data): make_class = {name:[] for name in pd.unique(df_data['targets'])} for ii,df_sub in df_data.groupby(['labels']): target = pd.unique(df_sub['targets']) label = pd.unique(df_sub['labels']) make_class[target[0]].append(label[0]) return make_class def Find_Optimal_Cutoff(target, predicted): """ Find the optimal probability cutoff point for a classification model related to event rate Parameters ---------- target : Matrix with dependent or target data, where rows are observations predicted : Matrix with predicted data, where rows are observations Returns ------- list type, with optimal cutoff value """ fpr, tpr, threshold = roc_curve(target, predicted) i = np.arange(len(tpr)) roc = pd.DataFrame({'tf' : pd.Series(tpr-(1-fpr), index=i), 'threshold' : pd.Series(threshold, index=i)}) roc_t = roc.iloc[(roc.tf-0).abs().argsort()[:1]] return list(roc_t['threshold']) def customized_partition(df_data,groupby_column = ['id','labels'],n_splits = 100,): """ modified for unaveraged volumes """ idx_object = dict(ids = [],idx = [],labels = []) for label,df_sub in df_data.groupby(groupby_column): idx_object['ids'].append(label[0]) idx_object['idx'].append(df_sub.index.tolist()) idx_object['labels'].append(label[-1]) df_object = pd.DataFrame(idx_object) idxs_test = [] for counter in range(int(1e4)): idx_test = [np.random.choice(item['idx'].values) for ii,item in df_object.groupby(groupby_column[-1])] if counter >= n_splits: return [np.concatenate(item) for item in idxs_test] break if counter > 0: temp = [] for used in idxs_test: used_temp = [','.join(str(ii) for ii in item) for item in used] idx_test_temp = [','.join(str(ii) for ii in item) for item in idx_test] a = set(used_temp) b = set(idx_test_temp) temp.append(len(a.intersection(b)) != len(idx_test)) if all(temp) == True: idxs_test.append(idx_test) else: idxs_test.append(idx_test) def check_train_test_splits(idxs_test): """ check if we get repeated test sets """ temp = [] for ii,item1 in enumerate(idxs_test): for jj,item2 in enumerate(idxs_test): if not ii == jj: if len(item1) == len(item2): sample1 = np.sort(item1) sample2 = np.sort(item2) temp.append(len(set(sample1).intersection(set(sample2))) == len(sample1)) temp = np.array(temp) return any(temp) def check_train_balance(df,idx_train,keys): """ check the balance of the training set. if only one of the classes has more 2 instances than the other we will randomly take out those 'extra instances' from the major class """ Counts = dict(Counter(df.iloc[idx_train]['targets'].values)) if np.abs(Counts[keys[0]] - Counts[keys[1]]) > 2: if Counts[keys[0]] > Counts[keys[1]]: key_major = keys[0] key_minor = keys[1] else: key_major = keys[1] key_minor = keys[0] ids_major = df.iloc[idx_train]['id'][df.iloc[idx_train]['targets'] == key_major] idx_train_new = idx_train.copy() for n in range(len(idx_train_new)): random_pick = np.random.choice(np.unique(ids_major),size = 1)[0] # print(random_pick,np.unique(ids_major)) idx_train_new = np.array([item for item,id_temp in zip(idx_train_new,df.iloc[idx_train_new]['id']) if (id_temp != random_pick)]) ids_major = np.array([item for item in ids_major if (item != random_pick)]) new_counts = dict(Counter(df.iloc[idx_train_new]['targets'])) if np.abs(new_counts[keys[0]] - new_counts[keys[1]]) > 3: if new_counts[keys[0]] > new_counts[keys[1]]: key_major = keys[0] key_minor = keys[1] else: key_major = keys[1] key_minor = keys[0] ids_major = df.iloc[idx_train_new]['id'][df.iloc[idx_train_new]['targets'] == key_major] elif np.abs(new_counts[keys[0]] - new_counts[keys[1]]) < 3: break return idx_train_new else: return idx_train def LOO_partition(df_data,target_column = 'labels'): temp = {'targets':[],target_column:[]} for (targets,labels),df_sub in df_data.groupby(['targets',target_column]): temp['targets'].append(targets) temp[target_column].append(labels) temp = pd.DataFrame(temp) temp = temp.sort_values(['targets',target_column]) living = temp[temp['targets'] == 'Living_Things'][target_column].values nonliving = temp[temp['targets'] == 'Nonliving_Things'][target_column].values test_pairs = [[a,b] for a in living for b in nonliving] idxs_train,idxs_test = [],[] for test_pair in test_pairs: idx_test = np.logical_or(df_data[target_column] == test_pair[0], df_data[target_column] == test_pair[1]) idx_train = np.invert(idx_test) idxs_train.append(np.where(idx_train == True)[0]) idxs_test.append(np.where(idx_test == True)[0]) return idxs_train,idxs_test def resample_ttest(x, baseline = 0.5, n_ps = 100, n_permutation = 10000, one_tail = False, n_jobs = 12, verbose = 0, full_size = True ): """ http://www.stat.ucla.edu/~rgould/110as02/bshypothesis.pdf https://www.tau.ac.il/~saharon/StatisticsSeminar_files/Hypothesis.pdf Inputs: ---------- x: numpy array vector, the data that is to be compared baseline: the single point that we compare the data with n_ps: number of p values we want to estimate one_tail: whether to perform one-tailed comparison """ import numpy as np import gc from joblib import Parallel,delayed # statistics with the original data distribution t_experiment = np.mean(x) null = x - np.mean(x) + baseline # shift the mean to the baseline but keep the distribution if null.shape[0] > int(1e4): # catch for big data full_size = False if not full_size: size = int(1e3) else: size = null.shape[0] gc.collect() def t_statistics(null,size,): """ null: shifted data distribution size: tuple of 2 integers (n_for_averaging,n_permutation) """ null_dist = np.random.choice(null,size = size,replace = True) t_null = np.mean(null_dist,0) if one_tail: return ((np.sum(t_null >= t_experiment)) + 1) / (size[1] + 1) else: return ((np.sum(np.abs(t_null) >= np.abs(t_experiment))) + 1) / (size[1] + 1) /2 ps = Parallel(n_jobs = n_jobs,verbose = verbose)(delayed(t_statistics)(**{ 'null':null, 'size':(size,int(n_permutation)),}) for i in range(n_ps)) return np.array(ps) def resample_ttest_2sample(a,b, n_ps = 100, n_permutation = 10000, one_tail = False, match_sample_size = True, n_jobs = 6, verbose = 0): from joblib import Parallel,delayed import gc # when the samples are dependent just simply test the pairwise difference against 0 # which is a one sample comparison problem if match_sample_size: difference = a - b ps = resample_ttest(difference, baseline = 0, n_ps = n_ps, n_permutation = n_permutation, one_tail = one_tail, n_jobs = n_jobs, verbose = verbose,) return ps else: # when the samples are independent t_experiment = np.mean(a) - np.mean(b) if not one_tail: t_experiment = np.abs(t_experiment) def t_statistics(a,b): group = np.concatenate([a,b]) np.random.shuffle(group) new_a = group[:a.shape[0]] new_b = group[a.shape[0]:] t_null = np.mean(new_a) - np.mean(new_b) if not one_tail: t_null = np.abs(t_null) return t_null gc.collect() ps = np.zeros(n_ps) for ii in range(n_ps): t_null_null = Parallel(n_jobs = n_jobs,verbose = verbose)(delayed(t_statistics)(**{ 'a':a, 'b':b}) for i in range(n_permutation)) if one_tail: ps[ii] = ((np.sum(t_null_null >= t_experiment)) + 1) / (n_permutation + 1) else: ps[ii] = ((np.sum(np.abs(t_null_null) >= np.abs(t_experiment))) + 1) / (n_permutation + 1) / 2 return ps class MCPConverter(object): import statsmodels as sms """ https://gist.github.com/naturale0/3915e2def589553e91dce99e69d138cc https://en.wikipedia.org/wiki/Holm%E2%80%93Bonferroni_method input: array of p-values. * convert p-value into adjusted p-value (or q-value) """ def __init__(self, pvals, zscores = None): self.pvals = pvals self.zscores = zscores self.len = len(pvals) if zscores is not None: srted = np.array(sorted(zip(pvals.copy(), zscores.copy()))) self.sorted_pvals = srted[:, 0] self.sorted_zscores = srted[:, 1] else: self.sorted_pvals = np.array(sorted(pvals.copy())) self.order = sorted(range(len(pvals)), key=lambda x: pvals[x]) def adjust(self, method = "holm"): import statsmodels as sms """ methods = ["bonferroni", "holm", "bh", "lfdr"] (local FDR method needs 'statsmodels' package) """ if method == "bonferroni": return [np.min([1, i]) for i in self.sorted_pvals * self.len] elif method == "holm": return [np.min([1, i]) for i in (self.sorted_pvals * (self.len - np.arange(1, self.len+1) + 1))] elif method == "bh": p_times_m_i = self.sorted_pvals * self.len / np.arange(1, self.len+1) return [np.min([p, p_times_m_i[i+1]]) if i < self.len-1 else p for i, p in enumerate(p_times_m_i)] elif method == "lfdr": if self.zscores is None: raise ValueError("Z-scores were not provided.") return sms.stats.multitest.local_fdr(abs(self.sorted_zscores)) else: raise ValueError("invalid method entered: '{}'".format(method)) def adjust_many(self, methods = ["bonferroni", "holm", "bh", "lfdr"]): if self.zscores is not None: df = pd.DataFrame(np.c_[self.sorted_pvals, self.sorted_zscores], columns=["p_values", "z_scores"]) for method in methods: df[method] = self.adjust(method) else: df = pd.DataFrame(self.sorted_pvals, columns=["p_values"]) for method in methods: if method != "lfdr": df[method] = self.adjust(method) return df def define_roi_category(): roi_dict = {'fusiform':'Visual', 'parahippocampal':'Visual', 'pericalcarine':'Visual', 'precuneus':'Visual', 'superiorparietal':'Working Memory', 'inferiortemporal':'Visual', 'lateraloccipital':'Visual', 'lingual':'Visual', 'rostralmiddlefrontal':'Working Memory', 'superiorfrontal':'Working Memory', 'ventrolateralPFC':'Working Memory', 'inferiorparietal':'Visual', } return roi_dict def stars(x): if x < 0.001: return '***' elif x < 0.01: return '**' elif x < 0.05: return '*' else: return 'n.s.' def get_fs(x): return x.split(' + ')[0] def get_clf(x): return x.split(' + ')[1] def rename_roi(x): return x.split('-')[-1] + '-' + x.split('-')[1] def strip_interaction_names(df_corrected): results = [] for ii,row in df_corrected.iterrows(): row['window'] = row['level1'].split('_')[0] try: row['attribute1']= row['level1'].split('_')[1] row['attribute2']= row['level2'].split('_')[1] except: row['attr1']= row['level1'].split('_')[1] row['attr2']= row['level2'].split('_')[1] results.append(row.to_frame().T) results = pd.concat(results) return results def compute_xy(df_sub,position_map,hue_map): df_add = [] for ii,row in df_sub.iterrows(): xtick = int(row['window']) - 1 attribute1_x = xtick + position_map[hue_map[row['attribute1']]] attribute2_x = xtick + position_map[hue_map[row['attribute2']]] row['x1'] = attribute1_x row['x2'] = attribute2_x df_add.append(row.to_frame().T) df_add = pd.concat(df_add) return df_add def split_probe_path(x,idx): temp = x.split('/') return temp[idx] def standard_MNI_coordinate_for_plot(): return { 'lh-fusiform':(-47,-52,-12), 'rh-fusiform':(47,-51,-14), 'lh-inferiorparietal':(-46,-60,33), 'rh-inferiorparietal':(46,-59,31), 'lh-inferiortemporal':(-47,-14,-34), 'rh-inferiortemporal':(48,-17,-31), 'lh-lateraloccipital':(-46,-58,-8), 'rh-lateraloccipital':(40,-78,12), 'lh-lingual':(-11,-81,7), 'rh-lingual':(11,-78,9), 'lh-rostralmiddlefrontal':(-30,50,24), 'rh-rostralmiddlefrontal':(4,58,30), 'lh-parahippocampal':(-25,-22,-22), 'rh-parahippocampal':(27,-19,-25), 'lh-pericalcarine':(-24,-66,8), 'rh-pericalcarine':(26,-68,12), 'lh-precuneus':None, 'rh-precuneus':None, 'lh-superiorfrontal':(-23,24,44), 'rh-superiorfrontal':(22,26,45), 'lh-superiorparietal':(-18,-61,55), 'rh-superiorparietal':(27,-60,45), 'lh-ventrolateralPFC':(-32,54,-4), 'rh-ventrolateralPFC':(42,46,0)} def bootstrap_behavioral_estimation(df_sub,n_bootstrap = int(1e2)): scores,chance = [],[] responses = df_sub['response.keys_raw'].values - 1 answers = df_sub['correctAns_raw'].values - 1 np.random.seed(12345) for n_ in tqdm(range(n_bootstrap)): idx = np.random.choice(np.arange(responses.shape[0]), size = responses.shape[0], replace = True) response_ = responses[idx] answer_ = answers[idx] score_ = roc_auc_score(answer_,response_) scores.append(score_) scores = np.array(scores) # chance # by keeping the answers in order but shuffle the response, # we can estimate the chance # level accuracy idx = np.random.choice(np.arange(responses.shape[0]), size = responses.shape[0], replace = True) response_ = responses[idx] answer_ = answers[idx] chance = np.array([roc_auc_score(answer_,shuffle(response_))\ for _ in tqdm(range(n_bootstrap))]) pvals = resample_ttest_2sample(scores,chance,one_tail = True, match_sample_size = True) return pvals,scores,chance def get_label_category_mapping(): return {'Chest-of-drawers': 'Nonliving_Things', 'armadillo': 'Living_Things', 'armchair': 'Nonliving_Things', 'axe': 'Nonliving_Things', 'barn-owl': 'Living_Things', 'bed': 'Nonliving_Things', 'bedside-table': 'Nonliving_Things', 'boat': 'Nonliving_Things', 'bookcase': 'Nonliving_Things', 'bus': 'Nonliving_Things', 'butterfly': 'Living_Things', 'car': 'Nonliving_Things', 'castle': 'Nonliving_Things', 'cat': 'Living_Things', 'cathedral': 'Nonliving_Things', 'chair': 'Nonliving_Things', 'cheetah': 'Living_Things', 'church': 'Nonliving_Things', 'coking-pot': 'Nonliving_Things', 'couch': 'Nonliving_Things', 'cow': 'Living_Things', 'crab': 'Living_Things', 'cup': 'Nonliving_Things', 'dolphin': 'Living_Things', 'dragonfly': 'Living_Things', 'drum': 'Nonliving_Things', 'duck': 'Living_Things', 'elephant': 'Living_Things', 'factory': 'Nonliving_Things', 'filling-cabinet': 'Nonliving_Things', 'fondue': 'Nonliving_Things', 'frying-pan': 'Nonliving_Things', 'giraffe': 'Living_Things', 'goldfinch': 'Living_Things', 'goose': 'Living_Things', 'granary': 'Nonliving_Things', 'guitar': 'Nonliving_Things', 'hammer': 'Nonliving_Things', 'hen': 'Living_Things', 'hippopotamus': 'Living_Things', 'horse': 'Living_Things', 'house': 'Nonliving_Things', 'hummingbird': 'Living_Things', 'killer-whale': 'Living_Things', 'kiwi': 'Living_Things', 'ladybird': 'Living_Things', 'lamp': 'Nonliving_Things', 'lectern': 'Nonliving_Things', 'lioness': 'Living_Things', 'lobster': 'Living_Things', 'lynx': 'Living_Things', 'magpie': 'Living_Things', 'manatee': 'Living_Things', 'mill': 'Nonliving_Things', 'motorbike': 'Nonliving_Things', 'narwhal': 'Living_Things', 'ostrich': 'Living_Things', 'owl': 'Living_Things', 'palace': 'Nonliving_Things', 'partridge': 'Living_Things', 'pelican': 'Living_Things', 'penguin': 'Living_Things', 'piano': 'Nonliving_Things', 'pigeon': 'Living_Things', 'plane': 'Nonliving_Things', 'pomfret': 'Living_Things', 'pot': 'Nonliving_Things', 'raven': 'Living_Things', 'rhino': 'Living_Things', 'rocking-chair': 'Nonliving_Things', 'rooster': 'Living_Things', 'saucepan': 'Nonliving_Things', 'saxophone': 'Nonliving_Things', 'scorpion': 'Living_Things', 'seagull': 'Living_Things', 'shark': 'Living_Things', 'ship': 'Nonliving_Things', 'small-saucepan': 'Nonliving_Things', 'sofa': 'Nonliving_Things', 'sparrow': 'Living_Things', 'sperm-whale': 'Living_Things', 'table': 'Nonliving_Things', 'tapir': 'Living_Things', 'teapot': 'Nonliving_Things', 'tiger': 'Living_Things', 'toucan': 'Living_Things', 'tractor': 'Nonliving_Things', 'train': 'Nonliving_Things', 'trumpet': 'Nonliving_Things', 'tuba': 'Nonliving_Things', 'turtle': 'Living_Things', 'van': 'Nonliving_Things', 'violin': 'Nonliving_Things', 'wardrobe': 'Nonliving_Things', 'whale': 'Living_Things', 'zebra': 'Living_Things'} def get_label_subcategory_mapping(): return {'Chest-of-drawers': 'Furniture', 'armadillo': 'Animals', 'armchair': 'Furniture', 'axe': 'Tools', 'barn-owl': 'Birds', 'bed': 'Furniture', 'bedside-table': 'Furniture', 'boat': 'Vehicles', 'bookcase': 'Furniture', 'bus': 'Vehicles', 'butterfly': 'Insects', 'car': 'Vehicles', 'castle': 'Buildings', 'cat': 'Animals', 'cathedral': 'Buildings', 'chair': 'Furniture', 'cheetah': 'Animals', 'church': 'Buildings', 'coking-pot': 'Kitchen_Uten', 'couch': 'Furniture', 'cow': 'Animals', 'crab': 'Marine_creatures', 'cup': 'Kitchen_Uten', 'dolphin': 'Marine_creatures', 'dragonfly': 'Insects', 'drum': 'Musical_Inst', 'duck': 'Birds', 'elephant': 'Animals', 'factory': 'Buildings', 'filling-cabinet': 'Furniture', 'fondue': 'Kitchen_Uten', 'frying-pan': 'Kitchen_Uten', 'giraffe': 'Animals', 'goldfinch': 'Birds', 'goose': 'Birds', 'granary': 'Buildings', 'guitar': 'Musical_Inst', 'hammer': 'Tools', 'hen': 'Birds', 'hippopotamus': 'Animals', 'horse': 'Animals', 'house': 'Buildings', 'hummingbird': 'Birds', 'killer-whale': 'Marine_creatures', 'kiwi': 'Birds', 'ladybird': 'Insects', 'lamp': 'Furniture', 'lectern': 'Furniture', 'lioness': 'Animals', 'lobster': 'Marine_creatures', 'lynx': 'Animals', 'magpie': 'Birds', 'manatee': 'Marine_creatures', 'mill': 'Buildings', 'motorbike': 'Vehicles', 'narwhal': 'Marine_creatures', 'ostrich': 'Birds', 'owl': 'Birds', 'palace': 'Buildings', 'partridge': 'Birds', 'pelican': 'Birds', 'penguin': 'Birds', 'piano': 'Musical_Inst', 'pigeon': 'Birds', 'plane': 'Vehicles', 'pomfret': 'Marine_creatures', 'pot': 'Kitchen_Uten', 'raven': 'Birds', 'rhino': 'Animals', 'rocking-chair': 'Furniture', 'rooster': 'Birds', 'saucepan': 'Kitchen_Uten', 'saxophone': 'Musical_Inst', 'scorpion': 'Insects', 'seagull': 'Birds', 'shark': 'Marine_creatures', 'ship': 'Vehicles', 'small-saucepan': 'Kitchen_Uten', 'sofa': 'Furniture', 'sparrow': 'Birds', 'sperm-whale': 'Marine_creatures', 'table': 'Furniture', 'tapir': 'Animals', 'teapot': 'Kitchen_Uten', 'tiger': 'Animals', 'toucan': 'Birds', 'tractor': 'Vehicles', 'train': 'Vehicles', 'trumpet': 'Musical_Inst', 'tuba': 'Musical_Inst', 'turtle': 'Animals', 'van': 'Vehicles', 'violin': 'Musical_Inst', 'wardrobe': 'Furniture', 'whale': 'Marine_creatures', 'zebra': 'Animals'} def make_df_axis(df_data): label_category_map = get_label_category_mapping() label_subcategory_map = get_label_subcategory_mapping() df_axis = pd.DataFrame({'labels':pd.unique(df_data['labels'])}) df_axis['category'] = df_axis['labels'].map(label_category_map) df_axis['subcategory'] = df_axis['labels'].map(label_subcategory_map) df_axis = df_axis.sort_values(['category','subcategory','labels']) return df_axis def load_same_same(sub,target_folder = 'decoding',target_file = '*None*csv'): working_dir = '../../../../results/MRI/nilearn/{}/{}'.format(sub,target_folder) working_data = glob(os.path.join(working_dir,target_file)) df = pd.concat([pd.read_csv(f) for f in working_data]) if 'model_name' not in df.columns: df['model_name'] = df['model'] df['feature_selector'] = df['model_name'].apply(get_fs) df['estimator'] = df['model_name'].apply(get_clf) if 'score' in df.columns: df['roc_auc'] = df['score'] temp = np.array([item.split('-') for item in df['roi'].values]) df['roi_name'] = temp[:,1] df['side'] = temp[:,0] return df def plot_stat_map(stat_map_img, bg_img = '', cut_coords = None, output_file = None, display_mode = 'ortho', colorbar = True, figure = None, axes = None, title = None, threshold = 1e-6, annotate = True, draw_cross = True, black_bg = 'auto', cmap = cm.coolwarm, symmetric_cbar = "auto", dim = 'auto', vmin_ = None,vmax=None, resampling_interpolation = 'continuous', **kwargs): bg_img, black_bg, bg_vmin, bg_vmax = _load_anat( bg_img, dim = dim, black_bg = black_bg) stat_map_img = _utils.check_niimg_3d( stat_map_img, dtype = 'auto') cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( _safe_get_data( stat_map_img, ensure_finite = True), vmax, symmetric_cbar, kwargs) display = _plot_img_with_bg( img = stat_map_img, bg_img = bg_img, cut_coords = cut_coords, output_file = output_file, display_mode = display_mode, figure = figure, axes = axes, title = title, annotate = annotate, draw_cross = draw_cross, black_bg = black_bg, threshold = threshold, bg_vmin = bg_vmin, bg_vmax = bg_vmax, cmap = cmap, vmin = vmin_, vmax = vmax, colorbar = colorbar, cbar_vmin = vmin_, cbar_vmax = cbar_vmax, resampling_interpolation = resampling_interpolation, **kwargs) return display def cross_validation(feature_dir, encoding_model, custom_scorer, BOLD_sc_source, idxs_train_source, idxs_test_source, image_source, image_target,): """ Encoding pipeline """ from sklearn import linear_model from sklearn.decomposition import PCA from sklearn.model_selection import GridSearchCV,cross_validate from sklearn.pipeline import make_pipeline features_source = np.array([np.load(os.path.join(feature_dir, encoding_model, item)) for item in image_source]) features_target = np.array([np.load(os.path.join(feature_dir, encoding_model, item)) for item in image_target]) pca = PCA(n_components = .99,random_state = 12345) reg = linear_model.Ridge(normalize = True, alpha = 1, random_state = 12345) reg = GridSearchCV(make_pipeline(pca,reg), dict(ridge__alpha = np.logspace(1,12,12), ), scoring = custom_scorer, n_jobs = 1, cv = 5, # iid = False, ) res = cross_validate(reg, features_source, BOLD_sc_source, scoring = 'r2', cv = zip(idxs_train_source,idxs_test_source), return_estimator = True, n_jobs = -1, verbose = 1,) return res,features_target,features_source def fill_results(scores, results, n_splits, conscious_source, conscious_target, roi_name, BOLD_sc_source, features_source, corr,): mean_variance = scores.copy() mean_variance = np.array([item[item > 0].mean() for item in mean_variance]) positive_voxels = np.array([np.sum(temp > 0) for temp in scores]) positive_voxel_indices = [','.join(str(item) for item in np.where(row > 0.)[0]) for row in scores] scores_to_save = mean_variance.copy() scores_to_save = np.nan_to_num(scores_to_save,) results['mean_variance'] = scores.mean(1)#scores_to_save results['fold'] = np.arange(n_splits) + 1 results['conscious_source'] = [conscious_source] * n_splits results['conscious_target'] = [conscious_target] * n_splits results['roi_name'] = [roi_name] * n_splits results['positive voxels' ]= positive_voxels results['n_parameters'] = [BOLD_sc_source.shape[1] * features_source.shape[1]] * n_splits results['corr'] = corr results['positive_voxel_indices'] = positive_voxel_indices return scores.mean(1),results ################################################################################### ################################################################################### import numpy as np import scipy.signal from scipy.stats import kurtosis from mne.preprocessing import find_outliers from numpy import nanmean from mne.utils import logger #from mne.preprocessing.eog import _get_eog_channel_index def hurst(x): """Estimate Hurst exponent on a timeseries. The estimation is based on the second order discrete derivative. Parameters ---------- x : 1D numpy array The timeseries to estimate the Hurst exponent for. Returns ------- h : float The estimation of the Hurst exponent for the given timeseries. """ y = np.cumsum(np.diff(x, axis=1), axis=1) b1 = [1, -2, 1] b2 = [1, 0, -2, 0, 1] # second order derivative y1 = scipy.signal.lfilter(b1, 1, y, axis=1) y1 = y1[:, len(b1) - 1:-1] # first values contain filter artifacts # wider second order derivative y2 = scipy.signal.lfilter(b2, 1, y, axis=1) y2 = y2[:, len(b2) - 1:-1] # first values contain filter artifacts s1 = np.mean(y1 ** 2, axis=1) s2 = np.mean(y2 ** 2, axis=1) return 0.5 *
np.log2(s2 / s1)
numpy.log2
import sys sys.path.append('../') import numpy as np import cv2 import math import os from ObstacleDetectionObjectives import numpy_iou class Obstacle(object): def __init__(self, x, y, w, h, depth_seg=None, obs_stats=None, conf_score=None, iou=None): self.x = int(x) #top self.y = int(y) #left self.w = int(w) self.h = int(h) self.valid_points = -1 #obstacle area self.max_iou = None self.multiple_detection_flag = False if depth_seg is not None: self.segmentation = depth_seg[1] self.depth_mean, self.depth_variance, self.valid_points = self.compute_depth_stats(depth_seg[0]) elif obs_stats is not None: self.segmentation = None self.depth_mean = obs_stats[0] self.depth_variance = obs_stats[1] if conf_score is not None: self.confidence = conf_score def compute_depth_stats(self, depth): if len(depth.shape) == 4: roi_depth = depth[0, self.y:self.y+self.h, self.x:self.x+self.w, 0] else: roi_depth = depth[self.y:self.y+self.h, self.x:self.x+self.w] mean_depth = 0 squared_sum = 0 valid_points = 0 for y in range(0, self.h): for x in range(0, self.w): if roi_depth[y,x] < 20 and roi_depth[y,x] > 0.0: mean_depth += roi_depth.item(y, x) squared_sum += roi_depth.item(y, x)**2 valid_points += 1 if valid_points > 0: mean_depth /= valid_points var_depth = (squared_sum / valid_points) - (mean_depth**2) else: mean_depth = -1 var_depth = -1 return mean_depth, var_depth, valid_points def evaluate_estimation(self, estimated_depth): estimated_mean, estimated_var, valid_points = self.compute_depth_stats(estimated_depth) mean_rmse = (self.depth_mean - estimated_mean)**2 mean_variance = (self.depth_variance - estimated_var)**2 return
np.sqrt(mean_rmse + 1e-6)
numpy.sqrt
# __author__ = '<NAME> in Academia Sinica IIS' import os, sys import os.path as osp import json import pickle import numpy as np import matplotlib import xml.etree.ElementTree as ET import argparse import platform import itertools import time import threading import matplotlib.pyplot as plt import torch from torchvision import transforms from pathlib import Path from tkinter import * from tkinter import ttk, filedialog from PIL import Image, ImageTk from libs.utils import apk, fig2img from libs.models import ResNet50 as model # from libs.models import ResNet18 as model matplotlib.use("Agg") means = [0.485, 0.456, 0.406] # imagenet stds = [0.229, 0.224, 0.225] # imagenet if platform.system() == 'Windows': splitter = '\\' elif platform.system() == 'Linux': splitter = '/' # ========================================================== parser = argparse.ArgumentParser(description="DetVisGUI") # dataset information parser.add_argument('--dataset', default='cub200', help='cars196 / cub200') parser.add_argument('--input_size', default=(224, 224), help='input image size') parser.add_argument('--ckpt', default='', help='model checkpoint path') parser.add_argument('--map', default='no_use', help='compute map and highlight list, no_use / compute / map path') parser.add_argument('--k_vals', nargs='+', default=[1,2,4,8], type=int, help='Recall @ Values.') parser.add_argument('--device', default='cuda', help='cpu / cuda') args = parser.parse_args() # ========================================================== class dataset: def __init__(self): self.dataset = args.dataset # self.img_root = Path('Datasets/{}/images'.format(args.dataset)) self.img_root = Path(osp.join('Datasets', args.dataset, 'images')) # self.features = np.load('features/feats_{}.npy'.format(args.dataset)) self.features = np.load(osp.join('features', 'feats_{}.npy'.format(args.dataset))) self.features = torch.Tensor(self.features).to(args.device) self.val_img_list = [] # val_img_list = np.load('features/names_{}.npy'.format(args.dataset)) val_img_list = np.load(osp.join('features', 'names_{}.npy'.format(args.dataset))) for x in val_img_list: x = str(Path(x)) splits = x.split(splitter) self.val_img_list.append(splits[3] + splitter + splits[4]) # get query image self.query_img_list = [] for x in sorted(os.listdir('query_images')): self.query_img_list.append(x) self.img_list = self.val_img_list self.query_root = 'query_images' def get_img_by_name(self, name): if self.is_query(name): img = Image.open(osp.join(self.query_root, Path(name))).convert('RGB') else: img = Image.open(osp.join(self.img_root, Path(name))).convert('RGB') return img def get_img_by_index(self, idx): return self.get_img_by_name(self.img_list[idx]) def get_feat_by_name(self, name): idx = np.where(np.asarray(self.img_list) == name)[0] return self.features[idx] def get_feat_by_idx(self, idx): return self.features[idx] def is_query(self, name): return False if name in self.val_img_list else True def switch_img_list(self): if self.img_list == self.val_img_list: self.img_list = self.query_img_list elif self.img_list == self.query_img_list: self.img_list = self.val_img_list return self.img_list def get_img_by_names_mt(self, top_idx, img_dict): t_list = [] self.img_dict = img_dict names = np.asarray(self.val_img_list)[top_idx] def get_img(idx, name, img_dict): img = Image.open(osp.join(self.img_root, name)).convert('RGB') img_dict[idx] = img for i in range(len(names)): t_list.append(threading.Thread(target=get_img, args=(top_idx[i], names[i], img_dict))) t_list[i].start() for i in t_list: i.join() def is_CARS196(self): return True if self.dataset == 'cars196' else False # main GUI class vis_tool: def __init__(self): self.data_info = dataset() self.window = Tk() self.menubar = Menu(self.window) self.listBox1 = Listbox(self.window, width=50, height=37 if self.data_info.is_CARS196() else 28, font=('Times New Roman', 10)) self.scrollbar1 = Scrollbar(self.window, width=15, orient="vertical") self.listBox1_info = StringVar() self.listBox1_label = Label(self.window, font=('Arial', 11), bg='yellow', width=4, height=1, textvariable=self.listBox1_info) self.frame1 = ttk.Notebook(self.window) self.tab_pred = Frame(self.frame1) self.tab_ans = Frame(self.frame1) self.tab_rank = Frame(self.frame1) self.frame1.add(self.tab_pred, text="Top 20", compound=TOP) self.frame1.add(self.tab_ans, text="Answer", compound=TOP) self.frame1.add(self.tab_rank, text="RankingMap", compound=TOP) self.eval_label = Label(self.tab_pred, font=('Arial', 11), bg='yellow', width=120) self.title_label_ans = Button(self.tab_ans, cursor="hand1", font=('Arial', 11), bg='yellow', width=120) # load image and show it on the window self.img = self.data_info.get_img_by_index(0) self.label_img1 = Label(self.window, height=300, width=300, highlightthickness=4, highlightbackground='#1f77b4') # query image self.label_img2 = Label(self.window) # ranking bar self.label_img3 = Label(self.tab_rank) # rank image self.title_label_ranking1 = Label(self.tab_rank, font=('Arial', 11), bg='yellow', width=120) self.title_label_ranking2 = Label(self.tab_rank, font=('Arial', 11), bg='#D9D9D9', width=120) self.panel = Frame(self.tab_rank) self.label_img4 = Label(self.panel, height=300, width=300) # select image self.label_img5 = Label(self.panel) # feature vectors self.model = model() if osp.exists(args.ckpt): print('Loading checkpoints from {} ...'.format(args.ckpt)) state_dict = torch.load(args.ckpt)['state_dict'] self.model.load_state_dict(state_dict) print('Done') elif args.ckpt != '': print('Not found checkpoint in {}!'.format(args.ckpt)) sys.exit() self.model = self.model.to(args.device) self.model.eval() # --------------------------------------------- self.box_num = 20 self.label_img_list = [Label(self.tab_pred) for _ in range(self.box_num)] self.photo_list = [ImageTk.PhotoImage(self.img) for _ in range(self.box_num)] self.label_list = [Label(self.tab_pred, font=('Arial', 11), bg='yellow', width=10, height=1) for _ in range(self.box_num)] self.label_cls_list = [Label(self.tab_pred, font=('Arial', 8), bg='#fdfd96', width=10, height=1) for _ in range(self.box_num)] self.ans_label_img_list = [Label(self.tab_ans) for _ in range(self.box_num)] self.ans_photo_list = [ImageTk.PhotoImage(self.img) for _ in range(self.box_num)] self.ans_label_list = [Label(self.tab_ans, font=('Arial', 11), bg='yellow', width=10, height=1) for _ in range(self.box_num)] # --------------------------------------------- self.find_name = "" self.find_label = Label(self.window, font=('Arial', 11), bg='yellow', width=10, height=1, text="find") self.find_entry = Entry(self.window, font=('Arial', 11), textvariable=StringVar(self.window, value=str(self.find_name)), width=10) self.find_button = Button(self.window, text='Enter', height=1, command=self.findname) self.listBox1_idx = 0 # image listBox # ====== ohter attribute ====== self.img_name = '' self.keep_aspect_ratio = False self.img_list = self.data_info.img_list self.transform = transforms.Compose([transforms.Resize(size=args.input_size), transforms.ToTensor(), transforms.Normalize(means, stds)]) def findname(self, event=None): self.find_name = self.find_entry.get() new_list = [] if self.find_name == '': new_list = self.data_info.img_list else: for img_name in self.data_info.img_list: if self.find_name[0] == "!": if self.find_name[1:] not in img_name: new_list.append(img_name) else: if self.find_name in img_name: new_list.append(img_name) if len(new_list) != 0: self.img_list = new_list self.clear_add_listBox1() else: self.window.title("Can't find any image about '{}'".format(self.find_name)) def clear_add_listBox1(self): self.listBox1.delete(0, 'end') # delete listBox1 0 ~ end items # add image name to listBox1 for item in self.img_list: self.listBox1.insert('end', item) self.listBox1.select_set(0) self.listBox1.focus() self.change_img() def extract_feature(self, name): if self.data_info.is_query(name): path = osp.join(self.data_info.query_root, name) else: path = osp.join(self.data_info.img_root, name) # open image img = Image.open(path).convert('RGB') img = self.transform(img).unsqueeze(0) with torch.no_grad(): img = img.to(args.device) feature = self.model(img).view(1, -1) return feature def compute_map(self): # compute similarity simmat = torch.mm(self.data_info.features, self.data_info.features.T).squeeze() simmat_rank = simmat.argsort(1, descending=True).cpu().numpy() if args.map == 'no_use': pass elif os.path.exists(args.map): aps = np.load(args.map) elif args.map == 'compute': print('Compute mAP ... (need a little time)') t1 = time.time() # compute map, cub200: 57s, cars196: 130s aps = [] for idx, name in enumerate(self.data_info.val_img_list): ans = [] # find answer names for img_name in self.data_info.val_img_list: if name.split(splitter)[0] in img_name: ans.append(img_name) # compute similarity pred = np.asarray(self.data_info.val_img_list)[simmat_rank[idx]] ap = apk(ans, pred[1:], len(pred)) aps.append(ap) np.save('{}_aps.npy'.format(args.dataset), aps) print('mAP spend time: {:.2} s'.format(time.time() - t1)) if 'aps' in locals(): print('map : {:7.4}%'.format(np.mean(aps) * 100)) for i in range(self.listBox1.size()): get_color = lambda r,g,b: '#%02x%02x%02x' % (r, g, b) if aps[i] <= 0.5: color = (np.asarray([255,0,0]) * (1-2*aps[i])).astype(np.uint8) else: color = (np.asarray([0,0,255]) * 2*(aps[i]-0.5)).astype(np.uint8) color = get_color(*color) self.listBox1.itemconfig(i, fg=color) def change_img(self, event=None): self.title_label_ranking2['bg'] = '#D9D9D9' self.title_label_ranking2['text'] = '' self.label_img4.config(image='') self.label_img4.config(highlightthickness=0) self.label_img5.config(image='') if len(self.listBox1.curselection()) != 0: self.listBox1_idx = self.listBox1.curselection()[0] self.listBox1_info.set("Image {:6} / {:6}".format(self.listBox1_idx + 1, self.listBox1.size())) self.img_name = name = self.listBox1.get(self.listBox1_idx) self.window.title("DATASET : " + self.data_info.dataset + ' ' + name) self.photo = ImageTk.PhotoImage(self.scale_img(self.data_info.get_img_by_name(name), keep_aspect_ratio=self.keep_aspect_ratio)) self.label_img1.config(image=self.photo) # --------------------------------------------- self.feat = self.extract_feature(name) # compute similarity simmat = torch.mm(self.feat, self.data_info.features.T).squeeze() simmat_rank = torch.argsort(simmat, descending=True) # ranking self.simmat_rank = simmat_rank = simmat_rank.cpu().numpy() self.simmat = simmat = simmat.cpu().numpy() offset = 0 if self.data_info.is_query(name) else 1 # not self matching top_rank_idx = simmat_rank[offset:self.box_num+offset] top_score = simmat[top_rank_idx] ans = [] ans_score = [] # find answer names if not self.data_info.is_query(name): for idx in range(len(self.data_info.features)): img_name = self.data_info.val_img_list[idx] if name.split(splitter)[0] in img_name: ans.append(img_name) ans_score.append(simmat[idx]) # show top-20 matching images if self.frame1.index(self.frame1.select()) == 0: # open images by multithreading img_dict = dict() self.data_info.get_img_by_names_mt(top_rank_idx, img_dict) for i in range(self.box_num): if len(ans) == 0: self.label_list[i].config(bg='#fdfd96') # Pastel yellow self.frame1.tab(1, state="disabled") elif self.data_info.val_img_list[top_rank_idx[i]] in ans: self.label_list[i].config(bg='#00ff00') self.frame1.tab(1, state="normal") else: self.label_list[i].config(bg='#fd9696') # very soft red self.frame1.tab(1, state="normal") self.label_list[i]['text'] = '{:2} ({:5.4})'.format(i+1, top_score[i]) self.label_cls_list[i]['text'] = '{}'.format(self.data_info.val_img_list[top_rank_idx[i]].split(splitter)[0]) # img = self.data_info.get_img_by_name(self.data_info.val_img_list[top_rank_idx[i]]) img = img_dict[top_rank_idx[i]] img = self.scale_img(img, fix_size=160, keep_aspect_ratio=self.keep_aspect_ratio) self.photo_list[i] = ImageTk.PhotoImage(img) self.label_img_list[i].config(image=self.photo_list[i]) # show answer images and scores if self.frame1.index(self.frame1.select()) == 1: idx = np.argsort(ans_score)[::-1] ans = np.asarray(ans)[idx] ans_score = np.asarray(ans_score)[idx] self.ans, self.ans_score = ans, ans_score ans_idx = [int(np.where(np.asarray(self.data_info.val_img_list) == a)[0]) for a in ans[:self.box_num]] # open images by multithreading img_dict = dict() self.data_info.get_img_by_names_mt(ans_idx, img_dict) for i in range(self.box_num): if i >= len(ans): self.ans_label_img_list[i].config(image='') self.ans_label_list[i]['bg'] = '#D9D9D9' self.ans_label_list[i]['text'] = '' else: idx = ans_idx[i] # img = self.data_info.get_img_by_name(self.data_info.val_img_list[idx]) img = img_dict[ans_idx[i]] img = self.scale_img(img, fix_size=160, keep_aspect_ratio=self.keep_aspect_ratio) self.ans_photo_list[i] = ImageTk.PhotoImage(img) self.ans_label_img_list[i].config(image=self.ans_photo_list[i]) self.ans_label_list[i]['bg'] = 'yellow' self.ans_label_list[i]['text'] = str(np.round(simmat[idx], 3)) self.title_label_ans['text'] = '{} ~ {} / {}'.format(0, min(self.box_num, len(ans)), len(ans)) self.cur_ans_idx = 0 if self.box_num + 1 >= len(self.ans) else self.box_num top_pred = [x for x in np.asarray(self.data_info.val_img_list)[top_rank_idx]] # plot rank bar band = 5 bar_len = 1250 pred = np.asarray(self.data_info.val_img_list)[simmat_rank] self.bar_img = np.ones((band, bar_len, 3)) * [253, 150, 150] self.bar_img = self.bar_img.astype(np.uint8) for a in ans: idx = int(np.where(pred == a)[0]) - offset if band*(idx + 1) <= bar_len and idx >=0 : self.bar_img[:, band*idx:band*(idx+1)] = [0, 255, 0] self.photo2 = ImageTk.PhotoImage(Image.fromarray(self.bar_img)) self.label_img2.config(image=self.photo2) ap = apk(ans, pred[1:], len(pred)) recall_all_k = [] for k in args.k_vals: recall_at_k = 1 if bool(set(pred[1:k+1]) & set(ans)) else 0 recall_all_k.append(recall_at_k) self.title_label_ranking1['text'] = 'AP : {:6.4} | Recall@{}: {} | Recall@{}: {} | Recall@{}: {} | Recall@{}: {}'.format(ap, args.k_vals[0], recall_all_k[0], args.k_vals[1], recall_all_k[1], args.k_vals[2], recall_all_k[2], args.k_vals[3], recall_all_k[3]) self.eval_label['text'] = '{:^4} / {:^4} in top 20 (AP : {:6.4})'.format(len(set(top_pred) & set(ans)), len(ans), ap) # plot rank image if self.frame1.index(self.frame1.select()) == 2: band = 5 bar_len = 600 max_row_num = 68 doc_len = len(self.data_info.val_img_list) col_num = int(bar_len / band) # 600 / 5 = 120 row_num = min(int(np.ceil(doc_len / col_num)), max_row_num) img_height = row_num * band + (row_num+1) * 2 img_width = bar_len + (col_num+1) * 2 self.rank_img = np.ones((img_height, img_width, 3)) * [253, 150, 150] self.rank_img = self.rank_img.astype(np.uint8) bg_color = [200, 200, 200] if row_num == int(np.ceil(doc_len / col_num)): # tail region last_col_idx = (doc_len % col_num) - offset self.rank_img[img_height-band-2:img_height, band*last_col_idx+last_col_idx*2:] = bg_color # row grid for i in range(0, img_height, band+2): self.rank_img[i:i+2, :] = bg_color # column grid for i in range(0, img_width, band+2): self.rank_img[:, i:i+2] = bg_color # answer block for a in ans: idx = int(
np.where(pred == a)
numpy.where
# encoding=utf8 """ Functions for performing classical hypothesis testing. Hypothesis Testing ------------------ .. autosummary:: :toctree: generated/ BinomialTest ChiSquareTest tTest References ---------- <NAME>., & <NAME>. (2012). Methods of multivariate analysis (3rd Edition). <NAME>. (1956). Nonparametric statistics: For the behavioral sciences. McGraw-Hill. ISBN 07-057348-4 Student's t-test. (2017, June 20). In Wikipedia, The Free Encyclopedia. From https://en.wikipedia.org/w/index.php?title=Student%27s_t-test&oldid=786562367 <NAME>. "Chi-Squared Test." From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/Chi-SquaredTest.html Wikipedia contributors. (2018, July 14). Binomial proportion confidence interval. In Wikipedia, The Free Encyclopedia. Retrieved 15:03, August 10, 2018, from https://en.wikipedia.org/w/index.php?title=Binomial_proportion_confidence_interval&oldid=850256725 Wikipedia contributors. (2018, July 5). Chi-squared test. In Wikipedia, The Free Encyclopedia. Retrieved 13:56, August 19, 2018, from https://en.wikipedia.org/w/index.php?title=Chi-squared_test&oldid=848986171 Wikipedia contributors. (2018, April 12). Pearson's chi-squared test. In Wikipedia, The Free Encyclopedia. Retrieved 12:55, August 23, 2018, from https://en.wikipedia.org/w/index.php?title=Pearson%27s_chi-squared_test&oldid=836064929 """ import numpy as np import numpy_indexed as npi from scipy.stats import beta, norm, t from scipy.special import comb class BinomialTest(object): r""" Performs a one-sample binomial test. Parameters ---------- x : int Number of successes out of :math:`n` trials. n : int Number of trials p : float, optional Expected probability of success alternative: str, {'two-sided', 'greater', 'lesser'}, optional Specifies the alternative hypothesis :math:`H_1`. Must be one of 'two-sided' (default), 'greater', or 'less'. alpha : float, optional Significance level continuity: bool, optional If True, the continuity corrected version of the Wilson score interval is used. Attributes ---------- x : int Number of successes out of :math:`n` trials. n : int Number of trials p : float Expected probability of success q : float Defined as :math:`1 - p` alternative : str Specifies the alternative hypothesis :math:`H_1`. Must be one of 'two-sided' (default), 'greater', or 'less'. alpha : float Significance level continuity : bool If True, the continuity corrected version of the Wilson score interval is used. p_value : float Computed p-value z : float z-score used in computation of intervals clopper_pearson_interval : dict Dictionary of the Clopper-Pearson lower and upper intervals and probability of success. wilson_score_interval : dict Dictionary of the Wilson Score lower and upper intervals and probability of success. agresti_coull_interval : dict Dictionary of the Agresti-Coull lower and upper intervals and probability of success. arcsine_transform_interval : dict Dictionary of the arcsine transformation lower and upper intervals and probability of success. test_summary : dict Dictionary containing test summary statistics. Raises ------ ValueError If number of successes :math:`x` is greater than the number of trials :math:`n`. ValueError If expected probability :math:`p` is greater than 1. ValueError If parameter :code:`alternative` is not one of {'two-sided', 'greater', 'lesser'} Notes ----- The Binomial test is a one-sample test applicable in the case of populations consisting of two classes or groups, such as male/female, cat/dog, etc. The proportion of the first group is denoted :math:`p`, while the second group is often denoted :math:`q`, which we know to be :math:`1 - p`. The null hypothesis of the test is that the proportion of the population is indeed :math:`p` and gives the researcher more information to determine if the random sample that was drawn could have come from a population having a proportion of :math:`p`. As the name of the test implies, the binomial distribution is the sampling distribution the of the proportions that could be observed when drawing random samples from a population. Therefore, the probability of obtaining :math:`x` objects in one category and :math:`n - x` in the other category out of a total :math:`n` trials is given by the binomial distribution probability mass function: .. math:: p(x) = \binom{n}{x} P^x (1 - P)^{n - x} :math:`(1 - P)` may be substituted for :math:`Q`. The binomial coefficient :math:`\binom{n}{x}` is defined as: .. math:: \binom{n}{x} = \frac{n!}{k!(n - k)!} The p-value of the test is calculated by the binomial distribution's cumulative distribution function, defined as: .. math:: Pr(X \leq x) = \sum^{[k]}_{i=0} \binom{n}{i} P^i (1 - P)^{n - i} There are several confidence intervals that can be computed when performing a binomial test. The most common is known as the Clopper-Pearson interval, which is an exact interval as it is based on the binomial distribution. The Clopper-Pearson interval can be defined several ways, one of which uses the relationship between the binomial distribution nad the beta distribution. .. math:: B\left(\frac{\alpha}{2};x,n-x+1\right) < \theta < B\left(1 - \frac{\alpha}{2};x + 1, n - x \right) The Agresti-Coull interval utilizes the standard normal distribution. :math:`z` is given as :math:`1 - \frac{\alpha}{2}`. The interval calculation proceeds as: With :math:`x` successes out of a total :math:`n` trials, we define :math:`\tilde{n}` as: .. math:: `\tilde{n} = n + z^2 and, .. math:: \tilde{p} = \frac{1}{\tilde{n}} \left(x + \frac{z^2}{2} \right) The confidence interval for the probability of success, :math:`p`, is then given as: .. math:: \tilde{p} \pm z \sqrt{\frac{\tilde{p}}{\tilde{n}} (1 - \tilde{p})} The arcsine transformation confidence interval is defined as: .. math:: sin^2 \left(\arcsin{\sqrt{p}} - \frac{z}{2\sqrt{n}} \right) < \theta < sin^2 \left(arcsin{\sqrt{p}} + \frac{z}{2\sqrt{n}} \right) Where :math:`z` is the quantile :math:`1 - \frac{\alpha}{2}}` of the standard normal distribution, as before. Lastly, the Wilson score interval can be computed with or without continuity correction. Without correction, the Wilson score interval success proability :math:`p` is defined as: .. math:: \frac{\hat{p} + \frac{z^2}{2n}}{1 + \frac{z^2}{n} \pm \frac{z}{1 + \frac{z^2}{n}} \sqrt{\frac{\hat{p} (1 - \hat{p}}{n}}{1 + \frac{z^2}{n}}} The Wilson score interval with continuity correction is defined as: .. math:: w^- = max \Bigg\{0, \frac{2n\hat{P} + z^2 - \Big[z \sqrt{z^2 - \frac{1}{n} + 4n\hat{p}(1 - \hat{p}) + (4\hat{p} - 2) + 1}\Big]}{2(n + z^2)}\Bigg\} w^+ = min \Bigg\{1, \frac{2n\hat{P} + z^2 + \Big[z \sqrt{z^2 - \frac{1}{n} + 4n\hat{p}(1 - \hat{p}) - (4\hat{p} - 2) + 1}\Big]}{2(n + z^2)}\Bigg\} Where :math:`w^-` and :math:`w^+` are the lower and upper bounds of the Wilson score interval corrected for contiunity. Examples -------- >>> x = 682 >>> n = 925 >>> bt = BinomialTest(n, x) >>> bt.test_summary {'Number of Successes': 682, 'Number of Trials': 925, 'alpha': 0.05, 'intervals': {'Agresti-Coull': {'conf level': 0.95, 'interval': (0.7079790581519885, 0.7646527304391209), 'probability of success': 0.7363158942955547}, 'Arcsine Transform': {'conf level': 0.95, 'interval': (0.708462749220724, 0.7651467076803447), 'probability of success': 0.7372972972972973, 'probability variance': 0.00020939458669772768}, 'Clopper-Pearson': {'conf level': 0.95, 'interval': (0.7076682640790369, 0.7654065582415227), 'probability of success': 0.7372972972972973}, 'Wilson Score': {'conf level': 0.95, 'interval': (0.46782780413153596, 0.5321721958684641), 'probability of success': 0.5}}, 'p-value': 2.4913404672588513e-13} >>> bt.p_value 2.4913404672588513e-13 >>> bt.clopper_pearson_interval {'conf level': 0.95, 'interval': (0.7076682640790369, 0.7654065582415227), 'probability of success': 0.7372972972972973} >>> bt2 = BinomialTest(n, x, alternative='greater') >>> bt2.p_value 1.2569330927920093e-49 >>> bt2.clopper_pearson_interval {'conf level': 0.95, 'interval': (0.7124129244365457, 1.0), 'probability of success': 0.7372972972972973} References ---------- <NAME>. (1956). Nonparametric statistics: For the behavioral sciences. McGraw-Hill. ISBN 07-057348-4 Wikipedia contributors. (2018, July 14). Binomial proportion confidence interval. In Wikipedia, The Free Encyclopedia. Retrieved 15:03, August 10, 2018, from https://en.wikipedia.org/w/index.php?title=Binomial_proportion_confidence_interval&oldid=850256725 """ def __init__(self, n, x, p=0.5, alternative='two-sided', alpha=0.05, continuity=True): if x > n: raise ValueError('number of successes cannot be greater than number of trials.') if p > 1.0: raise ValueError('expected probability of success cannot be greater than 1.') if alternative not in ('two-sided', 'greater', 'less'): raise ValueError("'alternative must be one of 'two-sided' (default), 'greater', or 'less'.") self.n = n self.x = x self.p = p self.q = 1.0 - self.p self.alpha = alpha self.alternative = alternative self.continuity = continuity self.p_value = self._p_value() if self.alternative == 'greater': self.z = norm.ppf(self.alpha) elif self.alternative == 'less': self.z = norm.ppf(1 - self.alpha) else: self.z = norm.ppf(1 - self.alpha / 2) self.clopper_pearson_interval = self._clopper_pearson_interval() self.wilson_score_interval = self._wilson_score_interval() self.agresti_coull_interval = self._agresti_coull_interval() self.arcsine_transform_interval = self._arcsine_transform_interval() self.test_summary = { 'Number of Successes': self.x, 'Number of Trials': self.n, 'p-value': self.p_value, 'alpha': self.alpha, 'intervals': { 'Clopper-Pearson': self.clopper_pearson_interval, 'Wilson Score': self.wilson_score_interval, 'Agresti-Coull': self.agresti_coull_interval, 'Arcsine Transform': self.arcsine_transform_interval } } def _p_value(self): r""" Calculates the p-value of the binomial test. Returns ------- pval : float The computed p-value. """ successes = np.arange(self.x + 1) pval = np.sum(comb(self.n, successes) * self.p ** successes * self.q ** (self.n - successes)) if self.alternative in ('two-sided', 'greater'): other_tail = np.arange(self.x, self.n + 1) y = comb(self.n, self.x) * (self.p ** self.x) * self.q ** (self.n - self.x) p_othertail = comb(self.n, other_tail) * self.p ** other_tail * self.q ** (self.n - other_tail) p_othertail = np.sum(p_othertail[p_othertail <= y]) if self.alternative == 'two-sided': pval = p_othertail * 2 #pval = 1 - pval elif self.alternative == 'greater': pval = p_othertail return pval def _clopper_pearson_interval(self): r""" Computes the Clopper-Pearson 'exact' confidence interval. References ---------- Wikipedia contributors. (2018, July 14). Binomial proportion confidence interval. In Wikipedia, The Free Encyclopedia. Retrieved 00:40, August 15, 2018, from https://en.wikipedia.org/w/index.php?title=Binomial_proportion_confidence_interval&oldid=850256725 """ p = self.x / self.n if self.alternative == 'less': lower_bound = 0.0 upper_bound = beta.ppf(1 - self.alpha, self.x + 1, self.n - self.x) elif self.alternative == 'greater': upper_bound = 1.0 lower_bound = beta.ppf(self.alpha, self.x, self.n - self.x + 1) else: lower_bound = beta.ppf(self.alpha / 2, self.x, self.n - self.x + 1) upper_bound = beta.ppf(1 - self.alpha / 2, self.x + 1, self.n - self.x) clopper_pearson_interval = { 'probability of success': p, 'conf level': 1 - self.alpha, 'interval': (lower_bound, upper_bound) } return clopper_pearson_interval def _wilson_score_interval(self): r""" Computes the Wilson score confidence interval. References ---------- Wikipedia contributors. (2018, July 14). Binomial proportion confidence interval. In Wikipedia, The Free Encyclopedia. Retrieved 00:40, August 15, 2018, from https://en.wikipedia.org/w/index.php?title=Binomial_proportion_confidence_interval&oldid=850256725 """ p = (self.p + (self.z ** 2 / (2. * self.n))) / (1. + (self.z ** 2. / self.n)) if self.continuity: if self.alternative == 'less': lower = 0.0 else: lower = (2. * self.n * self.p + self.z ** 2. - (self.z * np.sqrt( self.z ** 2. - (1. / self.n) + 4. * self.n * self.p * self.q + (4. * self.p - 2.) + 1.))) / \ (2. * (self.n + self.z ** 2.)) if self.alternative == 'greater': upper = 1.0 else: upper = (2. * self.n * self.p + self.z ** 2. + (self.z * np.sqrt( self.z ** 2. - (1. / self.n) + 4. * self.n * self.p * self.q + (4. * self.p - 2.) + 1))) / (2. * ( self.n + self.z ** 2.)) upper_bound, lower_bound = np.minimum(1.0, upper),
np.maximum(0.0, lower)
numpy.maximum
""" <NAME> (<EMAIL>) Class to define the Dataset object. """ from PIL import Image import os import numpy as np import scipy.io import pandas as pd class Dataset: def __init__(self, train_df, test_df, val_df, database_root, number_of_slices, store_memory=True): """Initialize the Dataset object Args: train_df (dataframe): Training dataframe from TrainTestSplit.split test_df (dataframe): Testing dataframe from TrainTestSplit.splt val_df (dataframe): Validation dataframe from TrainTestSplit.split database_root (str): db root from config number_of_slices (int): Number of slices per group store_memory (bool, optional): Memory management argument. Defaults to True. """ # for idx, row in train_df.iterrows(): # print(type(row)) # print(row) # #print("Images volumes, {}".format(row.iloc[i*3])) # # #scipy 1.2.3 self.images_train = [] self.images_train_path = [] self.labels_train = [] self.labels_train_path = [] self.labels_liver_train = [] self.labels_liver_train_path = [] if train_df is not None: train_df = pd.read_csv(train_df, delim_whitespace = True) if isinstance(train_df, str) else train_df for idx, row in train_df.iterrows(): if (len(row) > 3): if store_memory: aux_images_train = [] aux_labels_train = [] aux_labels_liver_train = [] for i in range(number_of_slices): mat_file = os.path.join(database_root, str(row.iloc[i * 3])) aux_images_train.append(np.array(scipy.io.loadmat(mat_file)['section'], dtype=np.float32)) self.images_train.append(np.array(aux_images_train)) for i in range(number_of_slices): mat_file = os.path.join(database_root, str(row.iloc[i * 3 + 1])) aux_labels_train.append(np.array(scipy.io.loadmat(mat_file)['section'], dtype=np.float32)) self.labels_train.append(np.array(aux_labels_train)) for i in range(number_of_slices): mat_file = os.path.join(database_root, str(row.iloc[i * 3 + 2])) aux_labels_liver_train.append(np.array(scipy.io.loadmat(mat_file)['section'], dtype=np.float32)) self.labels_liver_train.append(np.array(aux_labels_liver_train)) if (idx + 1) % 1000 == 0: print('Loaded ' + str(idx) + ' train images') aux_images_train_path = [] aux_labels_train_path = [] aux_labels_liver_train_path = [] for i in range(number_of_slices): aux_images_train_path.append(os.path.join(database_root, str(row.iloc[i * 3]))) self.images_train_path.append(np.array(aux_images_train_path)) for i in range(number_of_slices): aux_labels_train_path.append(os.path.join(database_root, str(row.iloc[i * 3 + 1]))) self.labels_train_path.append(np.array(aux_labels_train_path)) for i in range(number_of_slices): aux_labels_liver_train_path.append(os.path.join(database_root, str(row.iloc[i * 3 + 2]))) self.labels_liver_train_path.append(np.array(aux_labels_liver_train_path)) self.images_train_path = np.array(self.images_train_path) self.labels_train_path = np.array(self.labels_train_path) self.labels_liver_train_path = np.array(self.labels_liver_train_path) # Load testing images (path) and labels self.images_test = [] self.images_test_path = [] if test_df is not None: test_df = pd.read_csv(test_df, delim_whitespace = True) if isinstance(test_df, str) else test_df for idx, row in test_df.iterrows(): if (len(row) > 1): if store_memory: aux_images_test = [] for i in range(number_of_slices): mat_file = os.path.join(database_root, str(row.iloc[i * 3])) # os.path.join(database_root, str(line.split()[i * 3])) aux_images_test.append( np.array(scipy.io.loadmat(mat_file)['section'], dtype=np.float32)) self.images_test.append(np.array(aux_images_test)) if (idx + 1) % 1000 == 0: print('Loaded ' + str(idx) + ' test images') aux_images_test_path = [] for i in range(number_of_slices): mat_file = os.path.join(database_root, str(row.iloc[i * 3])) aux_images_test_path.append(mat_file) self.images_test_path.append(
np.array(aux_images_test_path)
numpy.array
# -*- coding: utf-8 -*- """ These the test the public routines exposed in types/common.py related to inference and not otherwise tested in types/test_common.py """ from warnings import catch_warnings, simplefilter import collections import re from datetime import datetime, date, timedelta, time from decimal import Decimal from numbers import Number from fractions import Fraction import numpy as np import pytz import pytest import pandas as pd from pandas._libs import lib, iNaT, missing as libmissing from pandas import (Series, Index, DataFrame, Timedelta, DatetimeIndex, TimedeltaIndex, Timestamp, Panel, Period, Categorical, isna, Interval, DateOffset) from pandas import compat from pandas.compat import u, PY2, StringIO, lrange from pandas.core.dtypes import inference from pandas.core.dtypes.common import ( is_timedelta64_dtype, is_timedelta64_ns_dtype, is_datetime64_dtype, is_datetime64_ns_dtype, is_datetime64_any_dtype, is_datetime64tz_dtype, is_number, is_integer, is_float, is_bool, is_scalar, is_scipy_sparse, ensure_int32, ensure_categorical) from pandas.util import testing as tm import pandas.util._test_decorators as td @pytest.fixture(params=[True, False], ids=str) def coerce(request): return request.param # collect all objects to be tested for list-like-ness; use tuples of objects, # whether they are list-like or not (special casing for sets), and their ID ll_params = [ ([1], True, 'list'), # noqa: E241 ([], True, 'list-empty'), # noqa: E241 ((1, ), True, 'tuple'), # noqa: E241 (tuple(), True, 'tuple-empty'), # noqa: E241 ({'a': 1}, True, 'dict'), # noqa: E241 (dict(), True, 'dict-empty'), # noqa: E241 ({'a', 1}, 'set', 'set'), # noqa: E241 (set(), 'set', 'set-empty'), # noqa: E241 (frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241 (frozenset(), 'set', 'frozenset-empty'), # noqa: E241 (iter([1, 2]), True, 'iterator'), # noqa: E241 (iter([]), True, 'iterator-empty'), # noqa: E241 ((x for x in [1, 2]), True, 'generator'), # noqa: E241 ((x for x in []), True, 'generator-empty'), # noqa: E241 (Series([1]), True, 'Series'), # noqa: E241 (Series([]), True, 'Series-empty'), # noqa: E241 (Series(['a']).str, True, 'StringMethods'), # noqa: E241 (Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241 (Index([1]), True, 'Index'), # noqa: E241 (Index([]), True, 'Index-empty'), # noqa: E241 (DataFrame([[1]]), True, 'DataFrame'), # noqa: E241 (DataFrame(), True, 'DataFrame-empty'), # noqa: E241 (np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241 (np.array([]), True, 'ndarray-1d-empty'), # noqa: E241 (np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241 (np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241 (np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241 (np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241 (np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241 (np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241 (np.array(2), False, 'ndarray-0d'), # noqa: E241 (1, False, 'int'), # noqa: E241 (b'123', False, 'bytes'), # noqa: E241 (b'', False, 'bytes-empty'), # noqa: E241 ('123', False, 'string'), # noqa: E241 ('', False, 'string-empty'), # noqa: E241 (str, False, 'string-type'), # noqa: E241 (object(), False, 'object'), # noqa: E241 (np.nan, False, 'NaN'), # noqa: E241 (None, False, 'None') # noqa: E241 ] objs, expected, ids = zip(*ll_params) @pytest.fixture(params=zip(objs, expected), ids=ids) def maybe_list_like(request): return request.param def test_is_list_like(maybe_list_like): obj, expected = maybe_list_like expected = True if expected == 'set' else expected assert inference.is_list_like(obj) == expected def test_is_list_like_disallow_sets(maybe_list_like): obj, expected = maybe_list_like expected = False if expected == 'set' else expected assert inference.is_list_like(obj, allow_sets=False) == expected def test_is_sequence(): is_seq = inference.is_sequence assert (is_seq((1, 2))) assert (is_seq([1, 2])) assert (not is_seq("abcd")) assert (not is_seq(u("abcd"))) assert (not is_seq(np.int64)) class A(object): def __getitem__(self): return 1 assert (not is_seq(A())) def test_is_array_like(): assert inference.is_array_like(Series([])) assert inference.is_array_like(Series([1, 2])) assert inference.is_array_like(np.array(["a", "b"])) assert inference.is_array_like(Index(["2016-01-01"])) class DtypeList(list): dtype = "special" assert inference.is_array_like(DtypeList()) assert not inference.is_array_like([1, 2, 3]) assert not inference.is_array_like(tuple()) assert not inference.is_array_like("foo") assert not inference.is_array_like(123) @pytest.mark.parametrize('inner', [ [], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]), Series([]), Series(['a']).str, (x for x in range(5)) ]) @pytest.mark.parametrize('outer', [ list, Series, np.array, tuple ]) def test_is_nested_list_like_passes(inner, outer): result = outer([inner for _ in range(5)]) assert inference.is_list_like(result) @pytest.mark.parametrize('obj', [ 'abc', [], [1], (1,), ['a'], 'a', {'a'}, [1, 2, 3], Series([1]), DataFrame({"A": [1]}), ([1, 2] for _ in range(5)), ]) def test_is_nested_list_like_fails(obj): assert not inference.is_nested_list_like(obj) @pytest.mark.parametrize( "ll", [{}, {'A': 1}, Series([1])]) def test_is_dict_like_passes(ll): assert inference.is_dict_like(ll) @pytest.mark.parametrize( "ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])]) def test_is_dict_like_fails(ll): assert not inference.is_dict_like(ll) @pytest.mark.parametrize("has_keys", [True, False]) @pytest.mark.parametrize("has_getitem", [True, False]) @pytest.mark.parametrize("has_contains", [True, False]) def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains): class DictLike(object): def __init__(self, d): self.d = d if has_keys: def keys(self): return self.d.keys() if has_getitem: def __getitem__(self, key): return self.d.__getitem__(key) if has_contains: def __contains__(self, key): return self.d.__contains__(key) d = DictLike({1: 2}) result = inference.is_dict_like(d) expected = has_keys and has_getitem and has_contains assert result is expected def test_is_file_like(mock): class MockFile(object): pass is_file = inference.is_file_like data = StringIO("data") assert is_file(data) # No read / write attributes # No iterator attributes m = MockFile() assert not is_file(m) MockFile.write = lambda self: 0 # Write attribute but not an iterator m = MockFile() assert not is_file(m) # gh-16530: Valid iterator just means we have the # __iter__ attribute for our purposes. MockFile.__iter__ = lambda self: self # Valid write-only file m = MockFile() assert is_file(m) del MockFile.write MockFile.read = lambda self: 0 # Valid read-only file m = MockFile() assert is_file(m) # Iterator but no read / write attributes data = [1, 2, 3] assert not is_file(data) assert not is_file(mock.Mock()) @pytest.mark.parametrize( "ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)]) def test_is_names_tuple_passes(ll): assert inference.is_named_tuple(ll) @pytest.mark.parametrize( "ll", [(1, 2, 3), 'a', Series({'pi': 3.14})]) def test_is_names_tuple_fails(ll): assert not inference.is_named_tuple(ll) def test_is_hashable(): # all new-style classes are hashable by default class HashableClass(object): pass class UnhashableClass1(object): __hash__ = None class UnhashableClass2(object): def __hash__(self): raise TypeError("Not hashable") hashable = (1, 3.14, np.float64(3.14), 'a', tuple(), (1, ), HashableClass(), ) not_hashable = ([], UnhashableClass1(), ) abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), ) for i in hashable: assert inference.is_hashable(i) for i in not_hashable: assert not inference.is_hashable(i) for i in abc_hashable_not_really_hashable: assert not inference.is_hashable(i) # numpy.array is no longer collections.Hashable as of # https://github.com/numpy/numpy/pull/5326, just test # is_hashable() assert not inference.is_hashable(np.array([])) # old-style classes in Python 2 don't appear hashable to # collections.Hashable but also seem to support hash() by default if PY2: class OldStyleClass(): pass c = OldStyleClass() assert not isinstance(c, compat.Hashable) assert inference.is_hashable(c) hash(c) # this will not raise @pytest.mark.parametrize( "ll", [re.compile('ad')]) def test_is_re_passes(ll): assert inference.is_re(ll) @pytest.mark.parametrize( "ll", ['x', 2, 3, object()]) def test_is_re_fails(ll): assert not inference.is_re(ll) @pytest.mark.parametrize( "ll", [r'a', u('x'), r'asdf', re.compile('adsf'), u(r'\u2233\s*'), re.compile(r'')]) def test_is_recompilable_passes(ll): assert inference.is_re_compilable(ll) @pytest.mark.parametrize( "ll", [1, [], object()]) def test_is_recompilable_fails(ll): assert not inference.is_re_compilable(ll) class TestInference(object): def test_infer_dtype_bytes(self): compare = 'string' if PY2 else 'bytes' # string array of bytes arr = np.array(list('abc'), dtype='S1') assert lib.infer_dtype(arr) == compare # object array of bytes arr = arr.astype(object) assert lib.infer_dtype(arr) == compare # object array of bytes with missing values assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare def test_isinf_scalar(self): # GH 11352 assert libmissing.isposinf_scalar(float('inf')) assert libmissing.isposinf_scalar(np.inf) assert not libmissing.isposinf_scalar(-np.inf) assert not libmissing.isposinf_scalar(1) assert not libmissing.isposinf_scalar('a') assert libmissing.isneginf_scalar(float('-inf')) assert libmissing.isneginf_scalar(-np.inf) assert not libmissing.isneginf_scalar(np.inf) assert not libmissing.isneginf_scalar(1) assert not libmissing.isneginf_scalar('a') def test_maybe_convert_numeric_infinities(self): # see gh-13274 infinities = ['inf', 'inF', 'iNf', 'Inf', 'iNF', 'InF', 'INf', 'INF'] na_values = {'', 'NULL', 'nan'} pos = np.array(['inf'], dtype=np.float64) neg = np.array(['-inf'], dtype=np.float64) msg = "Unable to parse string" for infinity in infinities: for maybe_int in (True, False): out = lib.maybe_convert_numeric( np.array([infinity], dtype=object), na_values, maybe_int) tm.assert_numpy_array_equal(out, pos) out = lib.maybe_convert_numeric( np.array(['-' + infinity], dtype=object), na_values, maybe_int) tm.assert_numpy_array_equal(out, neg) out = lib.maybe_convert_numeric( np.array([u(infinity)], dtype=object), na_values, maybe_int) tm.assert_numpy_array_equal(out, pos) out = lib.maybe_convert_numeric( np.array(['+' + infinity], dtype=object), na_values, maybe_int) tm.assert_numpy_array_equal(out, pos) # too many characters with pytest.raises(ValueError, match=msg): lib.maybe_convert_numeric( np.array(['foo_' + infinity], dtype=object), na_values, maybe_int) def test_maybe_convert_numeric_post_floatify_nan(self, coerce): # see gh-13314 data = np.array(['1.200', '-999.000', '4.500'], dtype=object) expected = np.array([1.2, np.nan, 4.5], dtype=np.float64) nan_values = {-999, -999.0} out = lib.maybe_convert_numeric(data, nan_values, coerce) tm.assert_numpy_array_equal(out, expected) def test_convert_infs(self): arr = np.array(['inf', 'inf', 'inf'], dtype='O') result = lib.maybe_convert_numeric(arr, set(), False) assert result.dtype == np.float64 arr = np.array(['-inf', '-inf', '-inf'], dtype='O') result = lib.maybe_convert_numeric(arr, set(), False) assert result.dtype == np.float64 def test_scientific_no_exponent(self): # See PR 12215 arr = np.array(['42E', '2E', '99e', '6e'], dtype='O') result = lib.maybe_convert_numeric(arr, set(), False, True) assert np.all(np.isnan(result)) def test_convert_non_hashable(self): # GH13324 # make sure that we are handing non-hashables arr = np.array([[10.0, 2], 1.0, 'apple']) result = lib.maybe_convert_numeric(arr, set(), False, True) tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan])) def test_convert_numeric_uint64(self): arr = np.array([2**63], dtype=object) exp = np.array([2**63], dtype=np.uint64) tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp) arr = np.array([str(2**63)], dtype=object) exp = np.array([2**63], dtype=np.uint64) tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp) arr = np.array([np.uint64(2**63)], dtype=object) exp = np.array([2**63], dtype=np.uint64) tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp) @pytest.mark.parametrize("arr", [ np.array([2**63, np.nan], dtype=object), np.array([str(2**63), np.nan], dtype=object), np.array([np.nan, 2**63], dtype=object), np.array([np.nan, str(2**63)], dtype=object)]) def test_convert_numeric_uint64_nan(self, coerce, arr): expected = arr.astype(float) if coerce else arr.copy() result = lib.maybe_convert_numeric(arr, set(), coerce_numeric=coerce) tm.assert_almost_equal(result, expected) def test_convert_numeric_uint64_nan_values(self, coerce): arr = np.array([2**63, 2**63 + 1], dtype=object) na_values = {2**63} expected = (np.array([np.nan, 2**63 + 1], dtype=float) if coerce else arr.copy()) result = lib.maybe_convert_numeric(arr, na_values, coerce_numeric=coerce) tm.assert_almost_equal(result, expected) @pytest.mark.parametrize("case", [ np.array([2**63, -1], dtype=object), np.array([str(2**63), -1], dtype=object), np.array([str(2**63), str(-1)], dtype=object), np.array([-1, 2**63], dtype=object), np.array([-1, str(2**63)], dtype=object), np.array([str(-1), str(2**63)], dtype=object)]) def test_convert_numeric_int64_uint64(self, case, coerce): expected = case.astype(float) if coerce else case.copy() result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce) tm.assert_almost_equal(result, expected) @pytest.mark.parametrize("value", [-2**63 - 1, 2**64]) def test_convert_int_overflow(self, value): # see gh-18584 arr = np.array([value], dtype=object) result = lib.maybe_convert_objects(arr) tm.assert_numpy_array_equal(arr, result) def test_maybe_convert_objects_uint64(self): # see gh-4471 arr = np.array([2**63], dtype=object) exp = np.array([2**63], dtype=np.uint64) tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp) # NumPy bug: can't compare uint64 to int64, as that # results in both casting to float64, so we should # make sure that this function is robust against it arr = np.array([np.uint64(2**63)], dtype=object) exp = np.array([2**63], dtype=np.uint64) tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp) arr = np.array([2, -1], dtype=object) exp = np.array([2, -1], dtype=np.int64) tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp) arr = np.array([2**63, -1], dtype=object) exp = np.array([2**63, -1], dtype=object) tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp) def test_mixed_dtypes_remain_object_array(self): # GH14956 array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1], dtype=object) result = lib.maybe_convert_objects(array, convert_datetime=1) tm.assert_numpy_array_equal(result, array) class TestTypeInference(object): # Dummy class used for testing with Python objects class Dummy(): pass def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype): # see pandas/conftest.py inferred_dtype, values = any_skipna_inferred_dtype # make sure the inferred dtype of the fixture is as requested assert inferred_dtype == lib.infer_dtype(values, skipna=True) def test_length_zero(self): result = lib.infer_dtype(np.array([], dtype='i4')) assert result == 'integer' result = lib.infer_dtype([]) assert result == 'empty' # GH 18004 arr = np.array([np.array([], dtype=object), np.array([], dtype=object)]) result = lib.infer_dtype(arr) assert result == 'empty' def test_integers(self): arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O') result = lib.infer_dtype(arr) assert result == 'integer' arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O') result = lib.infer_dtype(arr) assert result == 'mixed-integer' arr = np.array([1, 2, 3, 4, 5], dtype='i4') result = lib.infer_dtype(arr) assert result == 'integer' def test_bools(self): arr = np.array([True, False, True, True, True], dtype='O') result = lib.infer_dtype(arr) assert result == 'boolean' arr = np.array([np.bool_(True), np.bool_(False)], dtype='O') result = lib.infer_dtype(arr) assert result == 'boolean' arr = np.array([True, False, True, 'foo'], dtype='O') result = lib.infer_dtype(arr) assert result == 'mixed' arr = np.array([True, False, True], dtype=bool) result = lib.infer_dtype(arr) assert result == 'boolean' arr = np.array([True, np.nan, False], dtype='O') result = lib.infer_dtype(arr, skipna=True) assert result == 'boolean' def test_floats(self): arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O') result = lib.infer_dtype(arr) assert result == 'floating' arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'], dtype='O') result = lib.infer_dtype(arr) assert result == 'mixed-integer' arr = np.array([1, 2, 3, 4, 5], dtype='f4') result = lib.infer_dtype(arr) assert result == 'floating' arr = np.array([1, 2, 3, 4, 5], dtype='f8') result = lib.infer_dtype(arr) assert result == 'floating' def test_decimals(self): # GH15690 arr = np.array([Decimal(1), Decimal(2), Decimal(3)]) result = lib.infer_dtype(arr) assert result == 'decimal' arr = np.array([1.0, 2.0, Decimal(3)]) result = lib.infer_dtype(arr) assert result == 'mixed' arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)]) result = lib.infer_dtype(arr) assert result == 'decimal' arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O') result = lib.infer_dtype(arr) assert result == 'decimal' def test_string(self): pass def test_unicode(self): arr = [u'a', np.nan, u'c'] result = lib.infer_dtype(arr) assert result == 'mixed' arr = [u'a', np.nan, u'c'] result = lib.infer_dtype(arr, skipna=True) expected = 'unicode' if PY2 else 'string' assert result == expected @pytest.mark.parametrize('dtype, missing, skipna, expected', [ (float, np.nan, False, 'floating'), (float, np.nan, True, 'floating'), (object, np.nan, False, 'floating'), (object, np.nan, True, 'empty'), (object, None, False, 'mixed'), (object, None, True, 'empty') ]) @pytest.mark.parametrize('box', [pd.Series, np.array]) def test_object_empty(self, box, missing, dtype, skipna, expected): # GH 23421 arr = box([missing, missing], dtype=dtype) result = lib.infer_dtype(arr, skipna=skipna) assert result == expected def test_datetime(self): dates = [datetime(2012, 1, x) for x in range(1, 20)] index = Index(dates) assert index.inferred_type == 'datetime64' def test_infer_dtype_datetime(self): arr = np.array([Timestamp('2011-01-01'), Timestamp('2011-01-02')]) assert lib.infer_dtype(arr) == 'datetime' arr = np.array([np.datetime64('2011-01-01'), np.datetime64('2011-01-01')], dtype=object) assert lib.infer_dtype(arr) == 'datetime64' arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)]) assert lib.infer_dtype(arr) == 'datetime' # starts with nan for n in [pd.NaT, np.nan]: arr = np.array([n, pd.Timestamp('2011-01-02')]) assert lib.infer_dtype(arr) == 'datetime' arr = np.array([n, np.datetime64('2011-01-02')]) assert lib.infer_dtype(arr) == 'datetime64' arr = np.array([n, datetime(2011, 1, 1)]) assert lib.infer_dtype(arr) == 'datetime' arr = np.array([n, pd.Timestamp('2011-01-02'), n]) assert lib.infer_dtype(arr) == 'datetime' arr = np.array([n, np.datetime64('2011-01-02'), n]) assert lib.infer_dtype(arr) == 'datetime64' arr = np.array([n, datetime(2011, 1, 1), n]) assert lib.infer_dtype(arr) == 'datetime' # different type of nat arr = np.array([np.timedelta64('nat'), np.datetime64('2011-01-02')], dtype=object) assert lib.infer_dtype(arr) == 'mixed' arr = np.array([np.datetime64('2011-01-02'), np.timedelta64('nat')], dtype=object) assert lib.infer_dtype(arr) == 'mixed' # mixed datetime arr = np.array([datetime(2011, 1, 1), pd.Timestamp('2011-01-02')]) assert lib.infer_dtype(arr) == 'datetime' # should be datetime? arr = np.array([np.datetime64('2011-01-01'), pd.Timestamp('2011-01-02')]) assert lib.infer_dtype(arr) == 'mixed' arr = np.array([pd.Timestamp('2011-01-02'), np.datetime64('2011-01-01')]) assert lib.infer_dtype(arr) == 'mixed' arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1]) assert lib.infer_dtype(arr) == 'mixed-integer' arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1]) assert lib.infer_dtype(arr) == 'mixed' arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')]) assert lib.infer_dtype(arr) == 'mixed' def test_infer_dtype_timedelta(self): arr = np.array([pd.Timedelta('1 days'), pd.Timedelta('2 days')]) assert lib.infer_dtype(arr) == 'timedelta' arr = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D')], dtype=object) assert lib.infer_dtype(arr) == 'timedelta' arr = np.array([timedelta(1), timedelta(2)]) assert lib.infer_dtype(arr) == 'timedelta' # starts with nan for n in [pd.NaT, np.nan]: arr = np.array([n, Timedelta('1 days')]) assert lib.infer_dtype(arr) == 'timedelta' arr = np.array([n, np.timedelta64(1, 'D')]) assert lib.infer_dtype(arr) == 'timedelta' arr = np.array([n, timedelta(1)]) assert lib.infer_dtype(arr) == 'timedelta' arr = np.array([n, pd.Timedelta('1 days'), n]) assert lib.infer_dtype(arr) == 'timedelta' arr = np.array([n, np.timedelta64(1, 'D'), n]) assert lib.infer_dtype(arr) == 'timedelta' arr = np.array([n, timedelta(1), n]) assert lib.infer_dtype(arr) == 'timedelta' # different type of nat arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')], dtype=object) assert lib.infer_dtype(arr) == 'mixed' arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')], dtype=object) assert lib.infer_dtype(arr) == 'mixed' def test_infer_dtype_period(self): # GH 13664 arr = np.array([pd.Period('2011-01', freq='D'), pd.Period('2011-02', freq='D')]) assert lib.infer_dtype(arr) == 'period' arr = np.array([pd.Period('2011-01', freq='D'), pd.Period('2011-02', freq='M')]) assert lib.infer_dtype(arr) == 'period' # starts with nan for n in [pd.NaT, np.nan]: arr = np.array([n, pd.Period('2011-01', freq='D')]) assert lib.infer_dtype(arr) == 'period' arr = np.array([n, pd.Period('2011-01', freq='D'), n]) assert lib.infer_dtype(arr) == 'period' # different type of nat arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')], dtype=object) assert lib.infer_dtype(arr) == 'mixed' arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')], dtype=object) assert lib.infer_dtype(arr) == 'mixed' @pytest.mark.parametrize( "data", [ [datetime(2017, 6, 12, 19, 30), datetime(2017, 3, 11, 1, 15)], [Timestamp("20170612"), Timestamp("20170311")], [Timestamp("20170612", tz='US/Eastern'), Timestamp("20170311", tz='US/Eastern')], [date(2017, 6, 12), Timestamp("20170311", tz='US/Eastern')], [np.datetime64("2017-06-12"), np.datetime64("2017-03-11")], [np.datetime64("2017-06-12"), datetime(2017, 3, 11, 1, 15)] ] ) def test_infer_datetimelike_array_datetime(self, data): assert lib.infer_datetimelike_array(data) == "datetime" @pytest.mark.parametrize( "data", [ [timedelta(2017, 6, 12), timedelta(2017, 3, 11)], [timedelta(2017, 6, 12), date(2017, 3, 11)], [np.timedelta64(2017, "D"), np.timedelta64(6, "s")], [np.timedelta64(2017, "D"), timedelta(2017, 3, 11)] ] ) def test_infer_datetimelike_array_timedelta(self, data): assert lib.infer_datetimelike_array(data) == "timedelta" def test_infer_datetimelike_array_date(self): arr = [date(2017, 6, 12), date(2017, 3, 11)] assert lib.infer_datetimelike_array(arr) == "date" @pytest.mark.parametrize( "data", [ ["2017-06-12", "2017-03-11"], [20170612, 20170311], [20170612.5, 20170311.8], [Dummy(), Dummy()], [Timestamp("20170612"), Timestamp("20170311", tz='US/Eastern')], [Timestamp("20170612"), 20170311], [timedelta(2017, 6, 12), Timestamp("20170311", tz='US/Eastern')] ] ) def test_infer_datetimelike_array_mixed(self, data): assert lib.infer_datetimelike_array(data) == "mixed" @pytest.mark.parametrize( "first, expected", [ [[None], "mixed"], [[np.nan], "mixed"], [[pd.NaT], "nat"], [[datetime(2017, 6, 12, 19, 30), pd.NaT], "datetime"], [[np.datetime64("2017-06-12"), pd.NaT], "datetime"], [[date(2017, 6, 12), pd.NaT], "date"], [[timedelta(2017, 6, 12), pd.NaT], "timedelta"], [[np.timedelta64(2017, "D"), pd.NaT], "timedelta"] ] ) @pytest.mark.parametrize("second", [None, np.nan]) def test_infer_datetimelike_array_nan_nat_like(self, first, second, expected): first.append(second) assert lib.infer_datetimelike_array(first) == expected def test_infer_dtype_all_nan_nat_like(self): arr = np.array([np.nan, np.nan]) assert lib.infer_dtype(arr) == 'floating' # nan and None mix are result in mixed arr = np.array([np.nan, np.nan, None]) assert lib.infer_dtype(arr) == 'mixed' arr = np.array([None, np.nan, np.nan]) assert lib.infer_dtype(arr) == 'mixed' # pd.NaT arr = np.array([pd.NaT]) assert lib.infer_dtype(arr) == 'datetime' arr = np.array([pd.NaT, np.nan]) assert lib.infer_dtype(arr) == 'datetime' arr = np.array([np.nan, pd.NaT]) assert lib.infer_dtype(arr) == 'datetime' arr = np.array([np.nan, pd.NaT, np.nan]) assert lib.infer_dtype(arr) == 'datetime' arr = np.array([None, pd.NaT, None]) assert lib.infer_dtype(arr) == 'datetime' # np.datetime64(nat) arr = np.array([np.datetime64('nat')]) assert lib.infer_dtype(arr) == 'datetime64' for n in [np.nan, pd.NaT, None]: arr = np.array([n, np.datetime64('nat'), n]) assert lib.infer_dtype(arr) == 'datetime64' arr = np.array([pd.NaT, n, np.datetime64('nat'), n]) assert lib.infer_dtype(arr) == 'datetime64' arr = np.array([np.timedelta64('nat')], dtype=object) assert lib.infer_dtype(arr) == 'timedelta' for n in [np.nan, pd.NaT, None]: arr = np.array([n, np.timedelta64('nat'), n]) assert lib.infer_dtype(arr) == 'timedelta' arr = np.array([pd.NaT, n, np.timedelta64('nat'), n]) assert lib.infer_dtype(arr) == 'timedelta' # datetime / timedelta mixed arr = np.array([pd.NaT, np.datetime64('nat'), np.timedelta64('nat'), np.nan]) assert lib.infer_dtype(arr) == 'mixed' arr = np.array([np.timedelta64('nat'), np.datetime64('nat')], dtype=object) assert lib.infer_dtype(arr) == 'mixed' def test_is_datetimelike_array_all_nan_nat_like(self): arr = np.array([np.nan, pd.NaT, np.datetime64('nat')]) assert lib.is_datetime_array(arr) assert lib.is_datetime64_array(arr) assert not lib.is_timedelta_or_timedelta64_array(arr) arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')]) assert not lib.is_datetime_array(arr) assert not lib.is_datetime64_array(arr) assert lib.is_timedelta_or_timedelta64_array(arr) arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),
np.timedelta64('nat')
numpy.timedelta64
import re import numpy as np import scipy.misc import os from scipy.misc import imresize from PIL import Image, ImageDraw, ImageFont def mkdir_p(path): try: os.makedirs(path) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass def drawCaption(img, caption, max_len): img_txt = Image.fromarray(img) # get a font fnt = ImageFont.truetype('/eai/project/.fonts/FreeMono.ttf', 30) # get a drawing context d = ImageDraw.Draw(img_txt) d.text((10, 256), 'Stage-I', font=fnt, fill=(0, 0, 0, 255)) d.text((10, 512), 'Stage-II', font=fnt, fill=(0, 0, 0, 255)) d.text((10, 768), 'Stage-III', font=fnt, fill=(0, 0, 0, 255)) caption = caption.split(' ') cap1 = ' '.join(caption[:max_len]) cap2 = ' '.join(caption[max_len + 1:]) d.text((256, 10), cap1, font=fnt, fill=(0, 0, 0, 255)) d.text((256, 60), cap2, font=fnt, fill=(127, 127, 127, 255)) return img_txt def save_images_with_text( lr_sample_batchs, hr_sample_batchs, sr_sample_batchs, reals_batch, texts_batch, batch_size, max_len, startID, save_dir=None): if save_dir and not os.path.isdir(save_dir): print('Make a new folder: ', save_dir) mkdir_p(save_dir) # Save up to 16 samples for each text embedding/sentence img_shape = sr_sample_batchs[0][0].shape super_images = [] for i in range(batch_size): if not re.search('[a-zA-Z]+', texts_batch[i]): continue padding = 255 + np.zeros(img_shape) row1 = [padding] row2 = [padding] row3 = [padding] for j in range(lr_sample_batchs[0].shape[0]): lr_img = lr_sample_batchs[i][j] hr_img = hr_sample_batchs[i][j] sr_img = sr_sample_batchs[i][j] if j == 0: row1.append(imresize(reals_batch[0][i], sr_img.shape[:2])) row2.append(imresize(reals_batch[1][i], sr_img.shape[:2])) row3.append(imresize(reals_batch[2][i], sr_img.shape[:2])) lr_re_sample = imresize(lr_img, sr_img.shape[:2]) hr_re_sample = imresize(hr_img, sr_img.shape[:2]) row1.append(lr_re_sample) row2.append(hr_re_sample) row3.append(sr_img) row1 = np.concatenate(row1, axis=1) row2 = np.concatenate(row2, axis=1) row3 = np.concatenate(row3, axis=1) superimage = np.concatenate([row1, row2, row3], axis=0) top_padding = 255 + np.zeros((128, superimage.shape[1], 3)) superimage =
np.concatenate([top_padding, superimage], axis=0)
numpy.concatenate
import os import time import random import threading import numpy as np from keras import backend as K from keras.preprocessing.image import img_to_array, load_img from keras.preprocessing.image import ImageDataGenerator from keras.applications import vgg16 from pycocotools.coco import COCO EPS = np.finfo(float).eps split_name_dict = {'valid': 'val', 'train': 'train', 'test': 'test'} data_source_dir = "/media/Borg_LS/DATA" class CocoGenerator(object): def __init__(self, image_data_generator=ImageDataGenerator(), subset_name='2017', split_name='train', source_dir=data_source_dir, store_labels=False, batch_size=1, group_method='none', # one of 'none', 'random', 'ratio' shuffle=True, seed=None, standardize_method='zmuv', llb=None, lub=None, ): """Initialization""" self.set_name = split_name_dict[split_name] + subset_name self.image_data_generator = image_data_generator self.source_dir = os.path.join(source_dir, 'coco') self._coco = COCO(os.path.join(self.source_dir, 'annotations', 'instances_' + self.set_name + '.json')) self.image_ids = self._coco.getImgIds() if llb is not None or lub is not None: self.remove_outliers = True else: self.remove_outliers = False self.label_lower_bound = llb self.label_upper_bound = lub self._num_samples = None self._num_classes = None self._steps = None self._good_indices = None self._images = None self._labels = None self._label_names = None self.class_ids = None self.class_id_to_name = {} self.class_id_to_index = {} self.names = None self.name_to_class_id = {} self.name_to_index = {} self.load_metadata() self.batch_size = int(batch_size) self.group_method = group_method self.shuffle_groups = shuffle # self.store_labels = store_labels self.stored_labels =
np.zeros((self.num_samples, self.num_classes))
numpy.zeros
# -*- coding: utf-8 -*- from __future__ import print_function, division, absolute_import import numpy as np from numba import autojit, jit, double, void, uint32, npy_intp, typeof def uint_int_div_ary(elts, normdist, seed): for i in xrange(elts.shape[0]): # Problem with using sext instead of zext for uint32 elt = (seed[i] // uint32(normdist.shape[0])) elts[i] = elt def test_uint_int_div_ary(): NPATHS = 10 normdist = np.empty(1000) #np.random.normal(0., 1., 1000) seed =
np.arange(0x80000000, 0x80000000 + NPATHS, dtype=np.uint32)
numpy.arange
"""Tests the functions contained within utilities.py""" import numpy as np import pytest import quantum_heom.utilities as util # ------------------------------------------------------------------- # MATH-BASED FUNCTIONS # ------------------------------------------------------------------- @pytest.mark.parametrize( 'mat, ans', [(np.array([[0.5, 0.5], [0.5, 0.5]]), 1.0), (np.array([[2**(-1/2), 0], [0, 2**(-1/2)]]), 1.0)]) def test_trace_matrix_squared_pure(mat, ans): """ Tests that the correct value of 1 is returned for the trace of matrix squared for matrices that mimic a pure density matrix (i.e. tr(rho^2) = 1). """ assert np.isclose(util.trace_matrix_squared(mat), ans) @pytest.mark.parametrize( 'mat, ans', [(np.array([[0.5, 0], [0, 0.5]]), 0.5)]) def test_trace_matrix_squared_not_pure(mat, ans): """ Tests that the correct value of 1 is returned for the trace of matrix squared for matrices that mimic an impure density matrix (i.e. tr(rho^2) < 1). """ assert np.isclose(util.trace_matrix_squared(mat), ans) @pytest.mark.parametrize( 'mat_a, mat_b, ans', [(np.array([[0, 0], [0, 0]]), np.array([[0, 0], [0, 0]]), np.array([[0, 0], [0, 0]])), (np.array([[1, 0.3], [0.3, 1]]), np.array([[1, 0.3], [0.3, 1]]), np.array([[0, 0], [0, 0]]))]) def test_commutator_zero(mat_a, mat_b, ans): """ Tests that the correct commutator of A and B is returned. """ assert np.all(util.commutator(mat_a, mat_b) == ans) @pytest.mark.parametrize( 'mat_a, mat_b, ans', [(np.array([[0, 0], [0, 0]]), np.array([[0, 0], [0, 0]]), np.array([[0, 0], [0, 0]])), (np.array([[1, 0], [0, 1]]), np.array([[1, 0], [0, 1]]), np.array([[2, 0], [0, 2]]))]) def test_anti_commutator(mat_a, mat_b, ans): """ Tests that the correct anti-commutator of A and B is returned. """ assert np.all(util.commutator(mat_a, mat_b, anti=True) == ans) @pytest.mark.parametrize( 'scaling, dims', [(3, 2), (7, 4), (1, 6), (0, 4)]) def test_basis_change_identity(scaling, dims): """ Tests that the function maintains the expected behaviour that the identity matrix (and multiples of) is invariant under basis transformation. """ liouville = [True, False] for liou in liouville: if liou: assert np.all(util.basis_change(scaling * np.eye(dims**2), np.eye(dims), liou) == scaling * np.eye(dims**2)) else: assert np.all(util.basis_change(scaling * np.eye(dims), np.eye(dims), liou) == scaling * np.eye(dims)) # @pytest.mark.parametrize( # 'matrix, states, output', # [(np.array([[1, 0], [0, 0]]), np.array)]) def test_basis_change(): """ Tests that the correct matrix is returned when performing a basis change. """ @pytest.mark.parametrize( 'matrix', [np.array([[1, 2], [3, 4]]), np.array([[100, -300], [2, 44]]), np.eye(6)]) def test_renormalise_matrix(matrix): """ Asserts that the function correctly renormalises an input matrix to have trace 1. """ assert np.isclose(np.trace(util.renormalise_matrix(matrix)), 1.) @pytest.mark.parametrize( 'matrix', [np.array([[1, 2], [3, -1]]),
np.array([[100, -300], [2, -100]])
numpy.array