prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 1 13:37:29 2018
Class for implementing the scores for the composition UI and also the display image
with all the scores@author: <NAME>
"""
import cv2
import numpy as np
import itertools
from scipy.spatial import distance as dist
from skimage.measure import compare_ssim as ssim
from skimage.segmentation import slic
from skimage.segmentation import mark_boundaries
from skimage.util import img_as_float
import pandas as pd
from SaliencyMap import Saliency
class AutoScoreML ():
def __init__(self, extractedFeatures ):
self.df = pd.DataFrame(np.array(extractedFeatures))
def autoScoreML(self):
filepath_01 = 'D:\\google drive\A PhD Project at Godlsmiths\ArtistSupervisionMaya\csv_file\scoringApril30.csv'
filepath_02 = 'D:\\google drive\A PhD Project at Godlsmiths\ArtistSupervisionMaya\csv_file\scoringApril30_B.csv'
# =============================================================================
# filepath_03 = 'D:\\google drive\A PhD Project at Godlsmiths\ArtistSupervisionMaya\csv_file\scoring20apr2018_c.csv'
# filepath_04 = 'D:\\google drive\A PhD Project at Godlsmiths\ArtistSupervisionMaya\csv_file\scoring20apr2018_d.csv'
# filepath_05 = 'D:\\google drive\A PhD Project at Godlsmiths\ArtistSupervisionMaya\csv_file\scoring20apr2018_e.csv'
# filepath_06 = 'D:\\google drive\A PhD Project at Godlsmiths\ArtistSupervisionMaya\csv_file\scoring21apr2018_a.csv'
# filepath_07 = 'D:\\google drive\A PhD Project at Godlsmiths\ArtistSupervisionMaya\csv_file\scoring21apr2018_b.csv'
# filepath_08 = 'D:\\google drive\A PhD Project at Godlsmiths\ArtistSupervisionMaya\csv_file\scoring21apr2018_c.csv'
# filepath_09 = 'D:\\google drive\A PhD Project at Godlsmiths\ArtistSupervisionMaya\csv_file\scoring22apr2018_a.csv'
#
# =============================================================================
df_01 = pd.read_csv(filepath_01)
df_02 = pd.read_csv(filepath_02)
# =============================================================================
# df_03 = pd.read_csv(filepath_03)
# df_04 = pd.read_csv(filepath_04)
# df_05 = pd.read_csv(filepath_05)
# df_06 = pd.read_csv(filepath_06)
# df_07 = pd.read_csv(filepath_07)
# df_08 = pd.read_csv(filepath_08)
# df_09 = pd.read_csv(filepath_09)
# =============================================================================
frames= [df_01, df_02
#,df_03, df_04, df_05, df_06, df_07, df_08, df_09
]
df = pd.concat(frames)
df.reset_index(drop = True, inplace = True)
# drop the Null Value
df.dropna(inplace=True)
# select the features to use:
df.drop(['file', 'CompositionUserChoice'], axis=1, inplace=True)
X_train = df.drop('judge', axis = 1)
#y = df['judge']
X_test = self.df
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
# construct the ANN
# import the Keras Library and the required packages
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
import os
# load json and create model
json_file = open("D:\\google drive\\A PhD Project at Godlsmiths\\ArtistSupervisionProject\\code\\classifier.json", 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("D:\\google drive\\A PhD Project at Godlsmiths\\ArtistSupervisionProject\\code\\classifier.h5")
print("Loaded model from disk")
# evaluate loaded model on test data
loaded_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# =============================================================================
# score = loaded_model.evaluate(X_test, y_test, verbose=0)
# print("%s: %.2f%%" % (loaded_model.metrics_names[1], score[1]*100))
# =============================================================================
# predict the test set results
# =============================================================================
y_pred = loaded_model.predict(X_test)
for y in y_pred:
res = np.argmax(y)
return res
class CompositionAnalysis ():
def __init__ (self, image = None, imagepath = None, mask = None):
if imagepath:
self.image = cv2.imread(imagepath)
self.imagepath = imagepath
else:
self.image = image
self.totalPixels = self.image.size
self.gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
# =============================================================================
#
# def _borderCut(self, borderCutted):
#
#
# borderCutted[0:2, :] = 0
# borderCutted[-2:self.image.shape[0], :] = 0
# borderCutted[:, 0:2] = 0
# borderCutted[:, -2:self.image.shape[1]] = 0
#
# return borderCutted
# =============================================================================
def synthesisScores (self):
# return the display image for the UI
rows, cols, depth = self.image.shape
scoreSynthesisImg = np.zeros(self.image.shape, dtype="uint8")
# make solid color for the background
scoreSynthesisImg[:] = (218,218,218)
cv2.line(scoreSynthesisImg, ( int(self.image.shape[1] * 0.6), 20), ( int(self.image.shape[1] * 0.6),self.image.shape[0]), (50,50,140), 1)
cv2.line(scoreSynthesisImg, ( int(self.image.shape[1] * 0.75), 20), ( int(self.image.shape[1] * 0.75),self.image.shape[0]), (60,140,90), 1)
# collect the balance scores:
VisualBalanceScore = ( self.scoreVisualBalance + self.scoreHullBalance ) / 2
# corner balance and line
lineandcornerBalance = (self.cornersBalance + self.verticalandHorizBalanceMean ) / 2
# collect the rythm scores:
#asymmetry = (self.scoreFourier + self.verticalandHorizBalanceMean + self.ssimAsymmetry) / 3
asymmetry = (self.ssimAsymmetry +self.diagonalAsymmetry) / 2
scoreFourier = self.scoreFourier
# collect the gold proportion scores:
goldScore = self.scoreProportionAreaVsGoldenRatio
#score composition
scoreCompMax = max(self.diagonalasymmetryBalance, self.ScoreFourTriangleAdapted,self.ScoreBigTriangle)
ruleOfThird = self.ScoreRuleOfThird
# diagonal balance commposition
#diagonalasymmetryBalance = self.diagonalasymmetryBalance
# spiral
spiralScore = self.scoreSpiralGoldenRatio
# fractal
fractalScoreFromTarget = self.fractalScoreFromTarget
cv2.putText(scoreSynthesisImg, "Balance", (20, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
scoreSynthesisImg[20:24, 10:int(VisualBalanceScore*cols*0.9)] = (120,60,120)
cv2.putText(scoreSynthesisImg, "Rule of Third", (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
scoreSynthesisImg[35:39, 10:int(ruleOfThird*cols*0.9)] = (120,60,120)
cv2.putText(scoreSynthesisImg, "Composition Max", (20, 45), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
scoreSynthesisImg[50:54, 10:int(scoreCompMax*cols*0.9)] = (120,60,120)
#cv2.putText(scoreSynthesisImg, "Diagonal Comp", (20, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
#scoreSynthesisImg[65:70, 10:int(diagonalasymmetryBalance*cols*0.9)] = (120,60,120)
cv2.putText(scoreSynthesisImg, "Spiral ", (20, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
scoreSynthesisImg[80:84, 10:int(spiralScore*cols*0.9)] = (120,60,120)
cv2.putText(scoreSynthesisImg, "Asymmetry ", (20, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
scoreSynthesisImg[95:99, 10:int(asymmetry*cols*0.9)] = (120,60,120)
cv2.putText(scoreSynthesisImg, "Fourier ", (20, 105), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
scoreSynthesisImg[110:114, 10:int(scoreFourier*cols*0.9)] = (120,60,120)
cv2.putText(scoreSynthesisImg, "CornerLinesBalance ", (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
scoreSynthesisImg[125:129, 10:int(lineandcornerBalance*cols*0.9)] = (120,60,120)
cv2.putText(scoreSynthesisImg, "Proportion ", (20, 135), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
scoreSynthesisImg[140:144, 10:int(goldScore*cols*0.9)] = (120,60,120)
cv2.putText(scoreSynthesisImg, "Fractal ", (20, 150), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
scoreSynthesisImg[155:159, 10:int(fractalScoreFromTarget*cols*0.9)] = (120,60,120)
#cv2.putText(scoreSynthesisImg, "Balance, asymmetry, Proportion, corner, spiral ", (20, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
#cv2.putText(scoreSynthesisImg, "Possible Comp: {} ".format(selectedComp), (10, 110), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
return scoreSynthesisImg
def fourierOnEdgesDisplay (self):
ImgImpRegionA, contours, keypoints = self._orbSegmentation ( maxKeypoints = 10000, edged = False, edgesdilateOpen = True, method = cv2.RETR_EXTERNAL)
cropped_img_lf = ImgImpRegionA[0:int(ImgImpRegionA.shape[0]), 0: int(ImgImpRegionA.shape[1] / 2) ]
cropped_img_rt = ImgImpRegionA[0:int(ImgImpRegionA.shape[0]), int(ImgImpRegionA.shape[1] / 2): ImgImpRegionA.shape[1] ]
#imgDftGray = self._returnDFT(ImgImpRegionA)
imgDftGraylf = self._returnDFT(cropped_img_lf)
imgDftGrayRt = self._returnDFT(cropped_img_rt)
# number of pixels in left and number of pixels in right
numberOfWhite_lf = (imgDftGraylf>0).sum()
numberOfWhite_Rt = (imgDftGrayRt > 0).sum()
# create the stiched picture
stichedDft = self.image.copy()
stichedDft = np.concatenate((imgDftGraylf,imgDftGrayRt ), axis = 1)
score = (abs(numberOfWhite_lf - numberOfWhite_Rt)) / (numberOfWhite_lf + numberOfWhite_Rt)
# to penalise the change in rithm
scoreFourier = np.exp(-score * self.image.shape[0]/2)
#cv2.putText(stichedDft, "diff: {:.3f}".format(score), (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 1)
self.scoreFourier = scoreFourier
return stichedDft, scoreFourier
def _returnDFT (self, imageForDft):
ImgImpRegionA = imageForDft
ImgImpRegionA = cv2.cvtColor(ImgImpRegionA, cv2.COLOR_BGR2GRAY)
#dft = cv2.dft(np.float32(self.gray),flags = cv2.DFT_COMPLEX_OUTPUT)
dft = cv2.dft(np.float32(ImgImpRegionA),flags = cv2.DFT_COMPLEX_OUTPUT)
dft_shift = np.fft.fftshift(dft)
magnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]))
cv2.normalize( magnitude_spectrum, magnitude_spectrum, alpha = 0 , beta = 1 , norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
imgDftGray = np.array(magnitude_spectrum * 255, dtype = np.uint8)
meanThres = np.mean(imgDftGray)
_, imgDftGray = cv2.threshold(imgDftGray,meanThres, 255, cv2.THRESH_BINARY)
imgDftGray = cv2.cvtColor(imgDftGray, cv2.COLOR_GRAY2BGR)
return imgDftGray
def HOGcompute (self):
gray = self.gray.copy()
# h x w in pixels
cell_size = (8, 8)
# h x w in cells
block_size = (2, 2)
# number of orientation bins
nbins = 9
# Using OpenCV's HOG Descriptor
# winSize is the size of the image cropped to a multiple of the cell size
hog = cv2.HOGDescriptor(_winSize=(gray.shape[1] // cell_size[1] * cell_size[1],
gray.shape[0] // cell_size[0] * cell_size[0]),
_blockSize=(block_size[1] * cell_size[1],
block_size[0] * cell_size[0]),
_blockStride=(cell_size[1], cell_size[0]),
_cellSize=(cell_size[1], cell_size[0]),
_nbins=nbins)
# Create numpy array shape which we use to create hog_feats
n_cells = (gray.shape[0] // cell_size[0], gray.shape[1] // cell_size[1])
# We index blocks by rows first.
# hog_feats now contains the gradient amplitudes for each direction,
# for each cell of its group for each group. Indexing is by rows then columns.
hog_feats = hog.compute(gray).reshape(n_cells[1] - block_size[1] + 1,
n_cells[0] - block_size[0] + 1,
block_size[0], block_size[1], nbins).transpose((1, 0, 2, 3, 4))
# Create our gradients array with nbin dimensions to store gradient orientations
gradients = np.zeros((n_cells[0], n_cells[1], nbins))
# Create array of dimensions
cell_count = np.full((n_cells[0], n_cells[1], 1), 0, dtype=int)
# Block Normalization
for off_y in range(block_size[0]):
for off_x in range(block_size[1]):
gradients[off_y:n_cells[0] - block_size[0] + off_y + 1,
off_x:n_cells[1] - block_size[1] + off_x + 1] += \
hog_feats[:, :, off_y, off_x, :]
cell_count[off_y:n_cells[0] - block_size[0] + off_y + 1,
off_x:n_cells[1] - block_size[1] + off_x + 1] += 1
# Average gradients
gradients /= cell_count
# =============================================================================
# # Plot HOGs using Matplotlib
# # angle is 360 / nbins * direction
# print (gradients.shape)
#
# color_bins = 5
# plt.pcolor(gradients[:, :, color_bins])
# plt.gca().invert_yaxis()
# plt.gca().set_aspect('equal', adjustable='box')
# plt.colorbar()
# plt.show()
# cv2.destroyAllWindows()
# =============================================================================
return
def goldenProportionOnCnts(self, numberOfCnts = 25, method = cv2.RETR_CCOMP, minArea = 2):
edgedForProp = self._edgeDetection( scalarFactor = 1, meanShift = 0, edgesdilateOpen = True, kernel = 3)
goldenPropImg = self.image.copy()
# create the contours from the segmented image
ing2, contours, hierarchy = cv2.findContours(edgedForProp, method,
cv2.CHAIN_APPROX_SIMPLE)
innerCnts = []
for cnt, h in zip (contours, hierarchy[0]):
if h[2] == -1 :
innerCnts.append(cnt)
sortedContours = sorted(innerCnts, key = cv2.contourArea, reverse = True)
selectedContours = [cnt for cnt in sortedContours if cv2.contourArea(cnt) > minArea]
for cnt in selectedContours[0: numberOfCnts]:
cv2.drawContours(goldenPropImg, [cnt], -1, (255, 0, 255), 1)
# get all the ratio to check
ratioAreas = []
for index, cnt in enumerate(selectedContours[0: numberOfCnts]):
if index < len(selectedContours[0: numberOfCnts]) -1:
areaGoldenToCheck_previous = cv2.contourArea(selectedContours[index])
areaGoldenToCheck_next = cv2.contourArea(selectedContours[index + 1])
ratioArea = areaGoldenToCheck_previous / areaGoldenToCheck_next
ratioAreas.append(ratioArea)
meanAreaRatio = (np.mean(ratioAreas))
diffFromGoldenRatio = abs(1.618 - meanAreaRatio)
scoreProportionAreaVsGoldenRatio = np.exp(-diffFromGoldenRatio)
cv2.putText(goldenPropImg, "GoldPr: {:.3f}".format(scoreProportionAreaVsGoldenRatio), (20, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)
self.scoreProportionAreaVsGoldenRatio = scoreProportionAreaVsGoldenRatio
return goldenPropImg, scoreProportionAreaVsGoldenRatio
def cornerDetectionVisualBalance (self, maxCorners = 40 , minDistance = 6, midlineOnCornersCnt = True):
# based on the idea that there is a balance in balanced distribution of corner
# the mid axis is the mid of the extremes corners detected
corners = cv2.goodFeaturesToTrack(self.gray, maxCorners, 0.01, minDistance )
cornerimg = self.image.copy()
cornersOntheLeft = 0
cornersOntheRight = 0
cornersOnTop = 0
cornersOnBottom = 0
# find the limit x and y of the detected corners
listX = [corner[0][0] for corner in corners]
listY = [corner[0][1] for corner in corners]
minX = min(listX)
maxX = max (listX)
minY = min(listY)
maxY = max (listY)
for corner in corners:
x, y = corner[0]
x = int(x)
y = int(y)
if midlineOnCornersCnt:
# find the middle x and middle y
midx = minX + int((maxX - minX)/2)
midy = minY + int((maxY - minY)/2)
pass
else:
midx = int(self.image.shape[1] / 2)
midy = int(self.image.shape[0] / 2)
cv2.rectangle(cornerimg,(x-2,y-2),(x+2,y+2),(0,255,0), 1)
if x < midx:
cornersOntheLeft += 1
if x > midx:
cornersOntheRight += 1
if y < midy:
cornersOnTop += 1
if y > midy:
cornersOnBottom += 1
scoreHorizzontalCorners = np.exp(-(abs(cornersOntheLeft - cornersOntheRight )/(maxCorners/3.14)))
scoreVerticalCorners = np.exp(-(abs(cornersOnTop - cornersOnBottom )/(maxCorners/3.14)))
cv2.putText(cornerimg, "Corn H: {:.3f} V: {:.3f}".format(scoreHorizzontalCorners, scoreVerticalCorners), (20, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)
self.cornersBalance = (scoreHorizzontalCorners + scoreVerticalCorners) / 2
return cornerimg, scoreHorizzontalCorners, scoreVerticalCorners
def goldenSpiralAdaptedDetection (self, displayall = False , displayKeypoints = True, maxKeypoints = 100, edged = True):
goldenImgDisplay = self.image.copy()
# segmentation with orb and edges
ImgImpRegion, contours, keypoints = self._orbSegmentation ( maxKeypoints = maxKeypoints, edged = edged, edgesdilateOpen = False, method = cv2.RETR_EXTERNAL)
# find the center zig zag orb silhoutte
copyZigZag, ratioGoldenRectangleZigZagOrb , sorted_contoursZigZag, zigzagPerimeterScore= self._zigzagCntsArea()
#draw the bounding box
c = max(sorted_contoursZigZag, key=cv2.contourArea)
x,y,w,h = cv2.boundingRect(c)
if x==0 or x+w == self.image.shape[1] or y==0 or y+w == self.image.shape[0]:
cv2.rectangle(goldenImgDisplay, (0,0), (self.image.shape[1], self.image.shape[0]), (0,255,0), 1)
else:
cv2.rectangle(goldenImgDisplay,(x,y),(x+w,y+h),(0,255,0),1)
# create the guidelines
im, im2,im3, im4 = self._drawGoldenSpiral(drawRectangle=False, drawEllipses = True, x = w, y = h)
transX = x
transY = y
T = np.float32([[1,0,transX], [0,1, transY]])
imTranslated = cv2.warpAffine(im, T, (self.image.shape[1], self.image.shape[0]))
T2 = np.float32([[1,0, -self.image.shape[1] + transX + w], [0,1, -self.image.shape[0] + transY + h]])
imTranslated2 = cv2.warpAffine(im2, T2, (self.image.shape[1], self.image.shape[0]))
T3 = np.float32([[1,0, transX], [0,1, -self.image.shape[0] + transY + h]])
imTranslated3 = cv2.warpAffine(im3, T3, (self.image.shape[1], self.image.shape[0]))
T4 = np.float32([[1,0, -self.image.shape[1] + transX + w], [0,1, transY ]])
imTranslated4 = cv2.warpAffine(im4, T4, (self.image.shape[1], self.image.shape[0]))
# bitwise the guidlines for one display img
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated)
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated2)
if displayall:
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated3)
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated4)
if displayKeypoints:
goldenImgDisplay = cv2.drawKeypoints(goldenImgDisplay, keypoints,goldenImgDisplay, flags =
cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS)
# dilate the spirals
kernel = np.ones((5,5),np.uint8)
imTranslated = cv2.dilate(imTranslated,kernel,iterations = 3)
imTranslated2 = cv2.dilate(imTranslated2,kernel,iterations = 3)
imTranslated3 = cv2.dilate(imTranslated3,kernel,iterations = 3)
imTranslated4 = cv2.dilate(imTranslated4,kernel,iterations = 3)
# loop to collect the intersection
intersection = cv2.bitwise_and(ImgImpRegion,imTranslated)
intersection2 = cv2.bitwise_and(ImgImpRegion,imTranslated2)
intersection3 = cv2.bitwise_and(ImgImpRegion,imTranslated3)
intersection4 = cv2.bitwise_and(ImgImpRegion,imTranslated4)
# sum of imgImpRegion
sumOfAllPixelInImgImpRegion = (ImgImpRegion>0).sum()
# sum of all intersections
sum1 = (intersection>0).sum()
sum2 = (intersection2>0).sum()
sum3 = (intersection3>0).sum()
sum4 = (intersection4>0).sum()
maxSumIntersection = max(sum1, sum2, sum3, sum4)
# calculate the ratio of the max vs whole
scoreSpiralGoldenRatio = maxSumIntersection / sumOfAllPixelInImgImpRegion
cv2.putText(goldenImgDisplay, "Gold: {:.3f}".format(scoreSpiralGoldenRatio), (20, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)
self.scoreSpiralGoldenRatio = scoreSpiralGoldenRatio
# =============================================================================
# cv2.imshow('ImgImpRegion', ImgImpRegion)
# cv2.imshow('imTranslated', imTranslated)
# cv2.imshow('inter', intersection)
# cv2.waitKey()
# cv2.destroyAllWindows()
# =============================================================================
return goldenImgDisplay, scoreSpiralGoldenRatio
def goldenSpiralFixDetection (self, displayall = False , displayKeypoints = True, maxKeypoints = 100, edged = True, numberOfCnts = 40, scaleFactor = 0.5, bonus = 10):
#goldenImgDisplay = self.image.copy()
# segmentation with orb and edges
ImgImpRegion, contours, keypoints = self._orbSegmentation ( maxKeypoints = maxKeypoints, edged = edged, edgesdilateOpen = False, method = cv2.RETR_EXTERNAL)
# implement the segmentation including the edges
edgedImg = self._edgeDetection(scalarFactor = 1, meanShift = 0, edgesdilateOpen = False, kernel = 5)
edgedImg = cv2.cvtColor(edgedImg, cv2.COLOR_GRAY2BGR)
# give a weight to the edges detection smaller than the orb
#edgedImg[np.where((edgedImg ==[255,255,255]).all(axis=2))] = [255,255,255]
# implement with inner shape
segmentationOnInnerCnts, contours = self._innerCntsSegmentation(numberOfCnts = numberOfCnts, method = cv2.RETR_CCOMP, minArea = 5)
segmentationOnInnerCnts[np.where((segmentationOnInnerCnts ==[255,255,255]).all(axis=2))] = [40,40,40]
# merge the masks
ImgImpRegion = cv2.bitwise_or(ImgImpRegion,edgedImg)
ImgImpRegion = cv2.bitwise_or(ImgImpRegion,segmentationOnInnerCnts)
goldenImgDisplay = ImgImpRegion.copy()
# =============================================================================
# # find the center zig zag orb silhoutte
# copyZigZag, ratioGoldenRectangleZigZagOrb , sorted_contoursZigZag, zigzagPerimeterScore= self._zigzagCntsArea()
#
# #draw the bounding box
# c = max(sorted_contoursZigZag, key=cv2.contourArea)
# x,y,w,h = cv2.boundingRect(c)
# =============================================================================
# set this way to make the boundig box the size of the frame.. for adaptive unmask above and adjust
x=0
y=0
w = self.image.shape[1]
h = self.image.shape[0]
if x==0 or x+w == self.image.shape[1] or y==0 or y+h == self.image.shape[0]:
cv2.rectangle(goldenImgDisplay, (0,0), (self.image.shape[1], self.image.shape[0]), (0,255,0), 1)
else:
cv2.rectangle(goldenImgDisplay,(x,y),(x+w,y+h),(0,255,0),1)
# create the guidelines
im, im2,im3, im4 = self._drawGoldenSpiral(drawRectangle=False, drawEllipses = True, x = w, y = h)
transX = x
transY = y
T = np.float32([[1,0,transX], [0,1, transY]])
imTranslated = cv2.warpAffine(im, T, (self.image.shape[1], self.image.shape[0]))
T2 = np.float32([[1,0, -self.image.shape[1] + transX + w], [0,1, -self.image.shape[0] + transY + h]])
imTranslated2 = cv2.warpAffine(im2, T2, (self.image.shape[1], self.image.shape[0]))
T3 = np.float32([[1,0, transX], [0,1, -self.image.shape[0] + transY + h]])
imTranslated3 = cv2.warpAffine(im3, T3, (self.image.shape[1], self.image.shape[0]))
T4 = np.float32([[1,0, -self.image.shape[1] + transX + w], [0,1, transY ]])
imTranslated4 = cv2.warpAffine(im4, T4, (self.image.shape[1], self.image.shape[0]))
# dilate the spirals
kernel = np.ones((5,5),np.uint8)
AimTranslated = cv2.dilate(imTranslated,kernel,iterations = 3)
AimTranslated2 = cv2.dilate(imTranslated2,kernel,iterations = 3)
AimTranslated3 = cv2.dilate(imTranslated3,kernel,iterations = 3)
AimTranslated4 = cv2.dilate(imTranslated4,kernel,iterations = 3)
# loop to collect the intersection
intersection = cv2.bitwise_and(ImgImpRegion,AimTranslated)
intersection2 = cv2.bitwise_and(ImgImpRegion,AimTranslated2)
intersection3 = cv2.bitwise_and(ImgImpRegion,AimTranslated3)
intersection4 = cv2.bitwise_and(ImgImpRegion,AimTranslated4)
# sum of imgImpRegion
sumOfAllPixelInSilhoutte = (ImgImpRegion > 0).sum()
sumofAlledgedandorb = (ImgImpRegion==255).sum()
sumofAllInnerCnts = (ImgImpRegion==40).sum()
sumOfAllPixelInImgImpRegion = sumofAlledgedandorb + (scaleFactor* sumofAllInnerCnts)
# sum of all intersections
sum1_orb = (intersection==255).sum()
sum2_orb = (intersection2==255).sum()
sum3_orb = (intersection3==255).sum()
sum4_orb = (intersection4==255).sum()
# for the inner shape
sum1_inn = (intersection==40).sum()
sum2_inn = (intersection2==40).sum()
sum3_inn = (intersection3==40).sum()
sum4_inn = (intersection4==40).sum()
# weight
sum1 = sum1_orb * bonus + (scaleFactor * sum1_inn)
sum2 = sum2_orb * bonus + (scaleFactor * sum2_inn)
sum3 = sum3_orb * bonus + (scaleFactor * sum3_inn)
sum4 = sum4_orb * bonus + (scaleFactor * sum4_inn)
maxSumIntersection = max(sum1, sum2, sum3, sum4)
# calculate the ratio of the max vs whole and weighted with the overall area of te silhoutte compare to the size of the frame
scoreSpiralGoldenRatio = maxSumIntersection / sumOfAllPixelInImgImpRegion * (sumOfAllPixelInSilhoutte / self.gray.size)
cv2.putText(goldenImgDisplay, "Gold: {:.3f}".format(scoreSpiralGoldenRatio), (20, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)
self.scoreSpiralGoldenRatio = scoreSpiralGoldenRatio
# bitwise the guidlines for one display img
if displayall == False:
if sum1 == max(sum1, sum2, sum3, sum4):
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated)
if sum2 == max(sum1, sum2, sum3, sum4):
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated2)
if sum3 == max(sum1, sum2, sum3, sum4):
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated3)
if sum4 == max(sum1, sum2, sum3, sum4):
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated4)
if displayall:
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated)
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated2)
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated3)
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated4)
if displayKeypoints:
goldenImgDisplay = cv2.drawKeypoints(goldenImgDisplay, keypoints,goldenImgDisplay, flags =
cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS)
return goldenImgDisplay, scoreSpiralGoldenRatio
def displayandScoreExtremePoints(self, numberOfCnts = 20):
blur = cv2.GaussianBlur(self.gray, (5, 5), 0)
mean = np.mean(blur)
# threshold the image, then perform a series of erosions +
# dilations to remove any small regions of noise
thresh = cv2.threshold(blur, mean, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.erode(thresh, None, iterations=2)
thresh = cv2.dilate(thresh, None, iterations=2)
# cut the border for more precision
thresh[0:2, :] = 0
thresh[-2:self.image.shape[0], :] = 0
thresh[:, 0:2] = 0
thresh[:, -2:self.image.shape[1]] = 0
# find contours in thresholded image, then grab the largest
# one
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[1]
sorted_contours = sorted(cnts, key=cv2.contourArea, reverse = True)
# =============================================================================
# c = max(cnts, key=cv2.contourArea)
#
# # determine the most extreme points along the contour
# extLeft = tuple(c[c[:, :, 0].argmin()][0])
# extRight = tuple(c[c[:, :, 0].argmax()][0])
# extTop = tuple(c[c[:, :, 1].argmin()][0])
# extBot = tuple(c[c[:, :, 1].argmax()][0])
# =============================================================================
extLeftList=[]
extRightList = []
extTopList = []
extBotList =[]
for c in sorted_contours[0:numberOfCnts]:
# determine the most extreme points along the contour
extLeft = tuple(c[c[:, :, 0].argmin()][0])
extRight = tuple(c[c[:, :, 0].argmax()][0])
extTop = tuple(c[c[:, :, 1].argmin()][0])
extBot = tuple(c[c[:, :, 1].argmax()][0])
extLeftList.append(extLeft)
extRightList.append(extRight)
extTopList.append(extTop)
extBotList.append(extBot)
# sort the list of tuple by x
extLeftListSorted = sorted(extLeftList, key=lambda x: x[0])
extRightListSorted = sorted(extRightList, key=lambda x: x[0], reverse = True)
extTopListSorted = sorted(extTopList, key=lambda x: x[1])
extBotListSorted = sorted(extBotList, key=lambda x: x[1], reverse = True)
extLeft = extLeftListSorted[0]
extRight = extRightListSorted[0]
extTop = extTopListSorted[0]
extBot = extBotListSorted[0]
# draw the outline of the object, then draw each of the
# extreme points, where the left-most is red, right-most
# is green, top-most is blue, and bottom-most is teal
image = self.image.copy()
for c in sorted_contours[0:numberOfCnts]:
cv2.drawContours(image, [c], -1, (0, 255, 255), 2)
cv2.circle(image, extLeft, 5, (0, 0, 255), -1)
cv2.circle(image, extRight, 5, (0, 255, 0), -1)
cv2.circle(image, extTop, 5, (255, 0, 0), -1)
cv2.circle(image, extBot, 5, (255, 255, 0), -1)
#calculate Manhattan distance from half side point
leftHalfSidePoint = (0, int(self.image.shape[0]/2))
rightHalfSidePoint =(self.image.shape[1], int(self.image.shape[0]/2))
topHalfSidePoint = (int(self.image.shape[1] /2), 0)
bottomHalfSidePoint = (int(self.image.shape[1] /2), self.image.shape[0])
cv2.circle(image, leftHalfSidePoint, 3, (0, 0, 255), -1)
cv2.circle(image, rightHalfSidePoint, 3, (0, 0, 255), -1)
cv2.circle(image, topHalfSidePoint, 3, (0, 0, 255), -1)
cv2.circle(image, bottomHalfSidePoint, 3, (0, 0, 255), -1)
#halfHight = int(self.image.shape[0]/2)
dist01 = dist.euclidean(extLeft, leftHalfSidePoint)
dist02 = dist.euclidean(extRight, rightHalfSidePoint)
#meanDistA = int((dist01 + dist02)/2 )
#scoreA = meanDistA / halfHight
#halfwidth = int(self.image.shape[1]/2)
dist03 = dist.euclidean(extTop, topHalfSidePoint)
dist04 = dist.euclidean(extBot, bottomHalfSidePoint)
#meanDistB = int((dist03 + dist04)/2 )
#scoreB = meanDistB / halfwidth
#meanScore = (scoreA + scoreB)/2
#scoreMeanDistanceOppositeToHalfSide = np.exp(-meanScore*1.9) # used with mean on negative
if extLeft[1] < (self.image.shape[0] / 2):
DistExtLeft = - dist01
else:
DistExtLeft = dist01
if extRight[1] < (self.image.shape[0] / 2):
DistExtRight = - dist02
else:
DistExtRight = dist02
if extTop[0] < (self.image.shape[1] / 2):
DistExtTop = - dist03
else:
DistExtTop = dist03
if extBot[0] < (self.image.shape[1] / 2):
DistExtBot = - dist04
else:
DistExtBot = dist04
# make the script indipendent from the size of the image
if self.image.shape[1]> self.image.shape[0]:
ratio = self.image.shape[1]
else:
ratio = self.image.shape[0]
DistExtLeftToHalf = DistExtLeft / ratio
DistExtRightToHalf = DistExtRight / ratio
DistExtTopToHalf = DistExtTop / ratio
DistExtBotToHalf = DistExtBot / ratio
# =============================================================================
# if self.image.shape[1]> self.image.shape[0]:
# ratio = self.image.shape[1] / self.image.shape[0]
# else:
# ratio = self.image.shape[0] / self.image.shape[1]
#
# meanNeg = (DistExtLeft + DistExtRight + DistExtTop + DistExtBot) / 4
# scoreMeanDistanceOppositeToHalfSideadapted = np.exp(-meanNeg / (ratio * 10))
# =============================================================================
#cv2.putText(image, "Epts: {:.3f}".format(scoreMeanDistanceOppositeToHalfSideadapted), (20, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)
# =============================================================================
#
# self.scoreMeanDistanceOppositeToHalfSide = scoreMeanDistanceOppositeToHalfSideadapted
# =============================================================================
# distances from borders:
DistLeftBorder = abs(extLeft[0] - 0)
DistRightBorder = abs(extRight[0] - self.image.shape[1])
DistTopBorder = abs(extTop[1] - 0)
DistBotBorder = abs(extBot[1] - self.image.shape[0])
# make it indipendent from the size of the image by normalised by the lenght of the related side frame
DistLeftBorder = DistLeftBorder / (self.image.shape[1])
DistRightBorder = DistRightBorder / (self.image.shape[1])
DistTopBorder = DistTopBorder / (self.image.shape[0])
DistBotBorder = DistBotBorder / (self.image.shape[0])
return image, DistExtLeftToHalf,DistExtRightToHalf,DistExtTopToHalf, DistExtBotToHalf, DistLeftBorder, DistRightBorder, DistTopBorder, DistBotBorder
def vertAndHorizLinesBalance (self):
edges = self._edgeDetection(scalarFactor = 1, meanShift = 0, edgesdilateOpen = False)
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 15, 100, 1)
copyImg = self.image.copy()
verticalLines = 0
horizontalLines = 0
verticalLinesLeft = 0
verticalLinesRight = 0
horizontalLinesLeft = 0
horizontalLinesRight = 0
allX = []
for line in lines:
x1, y1, x2, y2 = line[0]
allX.append(x1)
allX.append(x2)
# only horizzontal counts along the middle detection relevance
midX = int((max(allX) - min(allX))/2) + min(allX)
for line in lines:
x1, y1, x2, y2 = line[0]
angle = np.arctan2(y2 - y1, x2-x1) *180 / np.pi
# horizzontal lines
if angle == 0:
cv2.line(copyImg, (x1, y1), (x2, y2), (0,0,255), 1)
horizontalLines += 1
if x1 < midX and x2 < midX:
horizontalLinesLeft += 1
if x1 > midX and x2 > midX:
horizontalLinesRight += 1
# vertical lines
if angle == 90 or angle == -90 :
cv2.line(copyImg, (x1, y1), (x2, y2), (0,255,0), 1)
verticalLines += 1
if x1 < midX and x2 < midX:
verticalLinesLeft += 1
if x1 > midX and x2 > midX:
verticalLinesRight += 1
diffVerticals = abs(verticalLinesLeft - verticalLinesRight)
diffHorizontal = abs(horizontalLinesLeft -horizontalLinesRight )
if verticalLines == 0 or horizontalLines == 0:
verticalLinesBalanceScore = 0
horizontalLinesBalanceScore = 0
cv2.putText(copyImg, "Lines V: {:.3f} H: {:.3f}".format(verticalLinesBalanceScore,horizontalLinesBalanceScore), (20, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)
self.verticalandHorizBalanceMean = (verticalLinesBalanceScore + horizontalLinesBalanceScore) / 2
return copyImg, verticalLinesBalanceScore, horizontalLinesBalanceScore
else:
verticalLinesBalanceScore = (1 - (diffVerticals/verticalLines))
horizontalLinesBalanceScore = (1 - (diffHorizontal / horizontalLines))
cv2.putText(copyImg, "Lines V: {:.3f} H: {:.3f}".format(verticalLinesBalanceScore,horizontalLinesBalanceScore), (20, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)
self.verticalandHorizBalanceMean = (verticalLinesBalanceScore + horizontalLinesBalanceScore) / 2
return copyImg, verticalLinesBalanceScore, horizontalLinesBalanceScore
def numOfTangentandBalance (self):
edges = self._edgeDetection(scalarFactor = 1, meanShift = 0, edgesdilateOpen = False)
# first template
template = np.zeros((16,16), np.uint8)
template[7:9,0:16] = 255
template[0:16, 7:9] = 255
# w and h to also use later to draw the rectagles
w, h = template.shape[::-1]
# rotated template
M = cv2.getRotationMatrix2D((w/2,h/2),15,1)
template15 = cv2.warpAffine(template,M,(w,h))
template30 = cv2.warpAffine(template15,M,(w,h))
template45 = cv2.warpAffine(template30,M,(w,h))
template60 = cv2.warpAffine(template45,M,(w,h))
template75 = cv2.warpAffine(template60,M,(w,h))
# run the matchtemplate
result = cv2.matchTemplate(edges, template, cv2.TM_CCOEFF)
result15 = cv2.matchTemplate(edges, template15, cv2.TM_CCOEFF)
result30 = cv2.matchTemplate(edges, template30, cv2.TM_CCOEFF)
result45 = cv2.matchTemplate(edges, template45, cv2.TM_CCOEFF)
result60 = cv2.matchTemplate(edges, template60, cv2.TM_CCOEFF)
result75 = cv2.matchTemplate(edges, template75, cv2.TM_CCOEFF)
#find the points of match
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
threshold = max_val * 0.96
loc = np.where(result >= threshold)
loc90 = np.where(result15 >= threshold)
loc180 = np.where(result30 >= threshold)
loc270 = np.where(result45 >= threshold)
loc180a = np.where(result60 >= threshold)
loc270a = np.where(result75 >= threshold)
#convert edges for display
edges = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)
points = []
for pt in zip (*loc[::-1]):
cv2.rectangle(edges, pt, (pt[0] + w, pt[1] + h), (255,0,255), 1)
points.append(pt)
for pt in zip (*loc90[::-1]):
cv2.rectangle(edges, pt, (pt[0] + w, pt[1] + h), (255,0,255), 1)
points.append(pt)
for pt in zip (*loc180[::-1]):
cv2.rectangle(edges, pt, (pt[0] + w, pt[1] + h), (255,0,255), 1)
points.append(pt)
for pt in zip (*loc270[::-1]):
cv2.rectangle(edges, pt, (pt[0] + w, pt[1] + h), (255,0,255), 1)
points.append(pt)
for pt in zip (*loc180a[::-1]):
cv2.rectangle(edges, pt, (pt[0] + w, pt[1] + h), (255,0,255), 1)
points.append(pt)
for pt in zip (*loc270a[::-1]):
cv2.rectangle(edges, pt, (pt[0] + w, pt[1] + h), (255,0,255), 1)
points.append(pt)
score = np.exp(- len(points) )
# =============================================================================
# leftCount = 0
# rightCount = 0
# for p in points:
# if p[0] < self.image.shape[0]:
# leftCount += 1
# else:
# rightCount += 1
# =============================================================================
cv2.putText(edges, "Cross: {:.3f}".format(score), (20, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)
#self.crossDetectionbalance = score
return edges , score
def numberEdgesConvexCnt (self, minArea = True,
numberOfCnts = 8, areascalefactor = 1000 ):
HullimgCopy = self.image.copy()
gray = cv2.cvtColor(HullimgCopy,cv2.COLOR_BGR2GRAY)
meanThresh = np.mean(gray)
ret,thresh = cv2.threshold(gray, meanThresh, 255, cv2.THRESH_BINARY)
ing2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# cut the border for more precision
thresh[0:2, :] = 0
thresh[-2:self.image.shape[0], :] = 0
thresh[:, 0:2] = 0
thresh[:, -2:self.image.shape[1]] = 0
# sort contours by area
if minArea == False:
sorted_contours = sorted(contours, key=cv2.contourArea, reverse = True)
# sorted and selected list of areas in contours if minArea is True
if minArea:
selected_contours = []
minArea = self.gray.size / areascalefactor
for cnt in contours:
area = cv2.contourArea(cnt)
if area > minArea:
selected_contours.append(cnt)
sorted_contours = sorted(selected_contours, key = cv2.contourArea, reverse = True)
# select only the bigger contours
contoursSelection = sorted_contours[0:numberOfCnts]
# mask creation
blankHull = np.zeros((self.image.shape[0], self.image.shape[1], 3), np.uint8)
listofHullsPoints = []
for cnt in contoursSelection:
hull = cv2.convexHull(cnt)
cv2.drawContours(HullimgCopy, [hull], -1, (0,255,0),1)
cv2.drawContours(blankHull, [hull], -1, (255,255,255),-1)
for coord in hull:
listofHullsPoints.append(coord[0])
# sort the points on the hull by the x coordinates
listofHullsPointsSorted = sorted(listofHullsPoints, key=lambda x: x[0])
#extRightListSorted = sorted(extRightList, key=lambda x: x[0], reverse = True)
firstPointLeft = (listofHullsPointsSorted[0][0], listofHullsPointsSorted[0][1])
secondtPointLeft = (listofHullsPointsSorted[1][0], listofHullsPointsSorted[1][1])
thirdPointLeft = (listofHullsPointsSorted[2][0], listofHullsPointsSorted[2][1])
firstPointRight = (listofHullsPointsSorted[-1][0], listofHullsPointsSorted[-1][1])
secondtPointRight = (listofHullsPointsSorted[-2][0], listofHullsPointsSorted[-2][1])
thirdPointRight = (listofHullsPointsSorted[-3][0], listofHullsPointsSorted[-3][1])
# draw the point on the image for visualisaton purpose
cv2.circle(HullimgCopy, firstPointLeft, 5, (0, 0, 255), -1)
cv2.circle(HullimgCopy, secondtPointLeft, 5, (0, 255, 0), -1)
cv2.circle(HullimgCopy, thirdPointLeft, 5, (0, 255, 0), -1)
cv2.circle(HullimgCopy, firstPointRight, 5, (0, 0, 255), -1)
cv2.circle(HullimgCopy, secondtPointRight, 5, (0, 255, 0), -1)
cv2.circle(HullimgCopy, thirdPointRight, 5, (0, 255, 0), -1)
# we only need the y coordinate since the column will tell the one is come first second and third (we focus on the slope here)
# and normalised to the height to make it indipendent from the size of the image
firstPointLeftY = firstPointLeft[1] / self.image.shape[0]
secondtPointLeftY = secondtPointLeft[1] / self.image.shape[0]
thirdPointLeftY = thirdPointLeft[1] / self.image.shape[0]
firstPointRightY = firstPointRight[1] / self.image.shape[0]
secondtPointRightY = secondtPointRight[1] / self.image.shape[0]
thirdPointRightY = thirdPointRight[1] / self.image.shape[0]
#left mask and right mask
x = self.gray.shape[1]
blankHullLeft = blankHull.copy()
blankHullRight = blankHull.copy()
blankHullLeft[ : , 0: int(x/2)] = 0
blankHullRight[ : , int(x/2): x ] = 0
totalWhiteinHull = (blankHull > 0).sum()
totalWhiteinLeft = (blankHullLeft > 0).sum()
totalWhiteInRight = (blankHullRight > 0).sum()
#calculate the score for negative space balance
alpha = 3.14
scoreHullBalance = np.exp(-(abs(totalWhiteinLeft - totalWhiteInRight ) / totalWhiteinHull) * 1.618 * alpha )
self.scoreHullBalance = scoreHullBalance
cv2.putText(HullimgCopy, "NegBal: {:.3f}".format(scoreHullBalance), (20, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)
return HullimgCopy, scoreHullBalance , firstPointLeftY,secondtPointLeftY,thirdPointLeftY,firstPointRightY,secondtPointRightY,thirdPointRightY
def rectangularComposition (self, segmentation = 'ORB'):
# calculate the area of the template triangle based on the golden ratio
h, w, s = self.image.shape
upLeftX = int((w - (w*0.618)) / 2)
upLeftY = int((h - (h*0.618)) / 2)
baseRightX = upLeftX + int(w* 0.618)
baseRightY = upLeftY + int(h * 0.618)
# draw the rect mask
blankForMasking = np.zeros(self.image.shape, dtype = "uint8")
rectangularMask = cv2.rectangle(blankForMasking,(upLeftX,upLeftY),(baseRightX,baseRightY), (255, 255, 255), 2)
# segmentation using
if segmentation == 'ORB' :
ImgImpRegion, contours, keypoints = self._orbSegmentation ( maxKeypoints = 1000, edged = True, edgesdilateOpen = True, method = cv2.RETR_EXTERNAL)
if segmentation == 'saliency':
contours, ImgImpRegion = self._saliencySegmentation(method = cv2.RETR_EXTERNAL )
ImgImpRegion = cv2.cvtColor(ImgImpRegion, cv2.COLOR_GRAY2BGR)
# create the segmentations of the images using threshold
if segmentation == 'thresh':
contours, ImgImpRegion = self._thresholdSegmentation(method = cv2.RETR_LIST )
if segmentation == 'both':
ImgImpRegionA, contours, keypoints = self._orbSegmentation ( maxKeypoints = 1000, edged = True, edgesdilateOpen = True, method = cv2.RETR_EXTERNAL)
contours, ImgImpRegionB = self._saliencySegmentation(method = cv2.RETR_EXTERNAL )
ImgImpRegionB = cv2.cvtColor(ImgImpRegionB, cv2.COLOR_GRAY2BGR)
# create the both mask
ImgImpRegion = cv2.bitwise_or(ImgImpRegionA,ImgImpRegionB)
# dilate the mask to capture more relevant pixels
kernel = np.ones((5,5),np.uint8)
rectangularMask = cv2.dilate(rectangularMask,kernel,iterations = 3)
# count the total number of segmentation pixel bigger than 0
maskedImage = cv2.bitwise_and(ImgImpRegion, rectangularMask)
sumOfrelevantPixels = (maskedImage > 0).sum()
totalRelevantPixels = (ImgImpRegion > 0).sum()
# ratio of the number counted in and out of the triangle
rectCompScore = sumOfrelevantPixels/totalRelevantPixels
# draw the image for display
cv2.putText(ImgImpRegion, "RectComp: {:.3f}".format(rectCompScore), (20, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)
rectangularMask = cv2.rectangle(ImgImpRegion,(upLeftX,upLeftY),(baseRightX,baseRightY), (255, 255, 255), 2)
self.rectCompScore = rectCompScore
return ImgImpRegion, rectCompScore
def circleComposition (self, segmentation = 'ORB'):
# calculate the area of the template triangle based on the golden ratio
h, w, s = self.image.shape
# draw the ellipse mask
blankForMasking = np.zeros(self.image.shape, dtype = "uint8")
ellipseMask = cv2.ellipse(blankForMasking, (int(w/2),int(h/2)), (int(w*0.618/2), int(h*0.618/2)),0,0,360,(255, 255, 255), 2)
# segmentation using
if segmentation == 'ORB' :
ImgImpRegion, contours, keypoints = self._orbSegmentation ( maxKeypoints = 1000, edged = True, edgesdilateOpen = True, method = cv2.RETR_EXTERNAL)
if segmentation == 'saliency':
contours, ImgImpRegion = self._saliencySegmentation(method = cv2.RETR_EXTERNAL )
ImgImpRegion = cv2.cvtColor(ImgImpRegion, cv2.COLOR_GRAY2BGR)
# create the segmentations of the images using threshold
if segmentation == 'thresh':
contours, ImgImpRegion = self._thresholdSegmentation(method = cv2.RETR_LIST )
if segmentation == 'both':
ImgImpRegionA, contours, keypoints = self._orbSegmentation ( maxKeypoints = 1000, edged = True, edgesdilateOpen = True, method = cv2.RETR_EXTERNAL)
contours, ImgImpRegionB = self._saliencySegmentation(method = cv2.RETR_EXTERNAL )
ImgImpRegionB = cv2.cvtColor(ImgImpRegionB, cv2.COLOR_GRAY2BGR)
# create the both mask
ImgImpRegion = cv2.bitwise_or(ImgImpRegionA,ImgImpRegionB)
# dilate the mask to capture more relevant pixels
kernel = np.ones((5,5),np.uint8)
ellipseMask = cv2.dilate(ellipseMask,kernel,iterations = 2)
# count the total number of segmentation pixel bigger than 0
maskedImage = cv2.bitwise_and(ImgImpRegion, ellipseMask)
sumOfrelevantPixels = (maskedImage > 0).sum()
totalRelevantPixels = (ImgImpRegion > 0).sum()
# ratio of the number counted in and out of the triangle
circleCompScore = sumOfrelevantPixels/totalRelevantPixels
# draw the image for display
cv2.putText(ImgImpRegion, "circleComp: {:.3f}".format(circleCompScore), (20, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)
cv2.ellipse(ImgImpRegion, (int(w/2),int(h/2)), (int(w*0.618/2), int(h*0.618/2)),0,0,360,(255, 255, 255), 2)
self.circleCompScore = circleCompScore
return ImgImpRegion, circleCompScore
def fourTriangleCompositionAdapted(self, segmentation = 'inner', minArea = True,
numberOfCnts = 100, areascalefactor = 2000, distanceMethod = 'segment'):
FourTriangleImg = self.image.copy()
# draw the lines of the diagonal
topLeft = (0,0)
lowerRight = (self.image.shape[1], self.image.shape[0])
lowerLeft = (0, self.image.shape[0])
topright = (self.image.shape[1],0)
#blankFourTriangle = np.array(blankFourTriangle)
cv2.line(FourTriangleImg, topright , lowerLeft, (255,255,255), 1) # topright - lowerleft
cv2.line(FourTriangleImg, topLeft , lowerRight, (255,0,255), 1) # topleft - lowerright
# draw the two perpendicular lines
leftIntersectionX, leftIntersectionY = self._find_perpendicular_through_point_to_line(lowerLeft[0], lowerLeft[1],topright[0],topright[1], topLeft[0], topLeft[1] )
cv2.line(FourTriangleImg, topLeft , (int(leftIntersectionX), int(leftIntersectionY) ), (255,255,255), 1)
rightIntersectionX, righttIntersectionY = self._find_perpendicular_through_point_to_line(lowerLeft[0], lowerLeft[1],topright[0],topright[1], lowerRight[0], lowerRight[1] )
cv2.line(FourTriangleImg, lowerRight , (int(rightIntersectionX), int(righttIntersectionY) ), (255,255,255), 1)
# second
leftIntersectionXB, leftIntersectionYB = self._find_perpendicular_through_point_to_line(topLeft[0], topLeft[1],lowerRight[0],lowerRight[1], lowerLeft[0], lowerLeft[1] )
cv2.line(FourTriangleImg, lowerLeft , (int(leftIntersectionXB), int(leftIntersectionYB) ), (255,0,255), 1)
rightIntersectionXB, righttIntersectionYB = self._find_perpendicular_through_point_to_line(topLeft[0], topLeft[1],lowerRight[0],lowerRight[1], topright[0], topright[1] )
cv2.line(FourTriangleImg, topright , (int(rightIntersectionXB), int(righttIntersectionYB) ), (255,0,255), 1)
# calculate the segmentation
if segmentation == 'ORB' :
blank, contours, keypoints = self._orbSegmentation ( maxKeypoints = 1000, edged = False, edgesdilateOpen = False, method = cv2.RETR_EXTERNAL)
if segmentation == 'saliency':
contours, SaliencyMask = self._saliencySegmentation(method = cv2.RETR_EXTERNAL )
# create the segmentations of the images using threshold
if segmentation == 'thresh':
contours, threshImg = self._thresholdSegmentation(method = cv2.RETR_LIST )
if segmentation == 'inner':
segmentationOnInnerCnts, contours = self._innerCntsSegmentation(numberOfCnts = numberOfCnts, method = cv2.RETR_CCOMP, minArea = 2)
# sort contours
sorted_contours = sorted(contours, key = cv2.contourArea, reverse = True)
# sorted and selected list of areas in contours if minArea is True
if minArea:
selected_contours = []
minArea = self.gray.size / areascalefactor
for cnt in sorted_contours[0:numberOfCnts]:
area = cv2.contourArea(cnt)
if area > minArea:
selected_contours.append(cnt)
sorted_contours = sorted(selected_contours, key = cv2.contourArea, reverse = True)
# select only the bigger contours
contoursSelection = sorted_contours[0:numberOfCnts]
# find the center of each contours and draw cnts, not using approx contours
imageDisplay, listOfCenterPoints = self._findCentreOfMass(image = FourTriangleImg, contours = contoursSelection, approxCnt = False)
# calculate the distance from the center points and the rule of third point(as in the paper)
# min distance of each center to the 4 points
distances_option_1 = []
distances_option_2 = []
if distanceMethod == 'segment':
for point in listOfCenterPoints:
centerPoint = np.asarray(point)
topLeftA = np.asarray((topLeft[0], topLeft[1]))
lowerRightA = np.asarray((lowerRight[0], lowerRight[1]))
lowerLeftA = np.asarray((lowerLeft[0], lowerLeft[1]))
toprightA = np.asarray((topright[0], topright[1]))
leftIntersectionPointA = | np.asarray((leftIntersectionX, leftIntersectionY)) | numpy.asarray |
from __future__ import division, print_function
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.mplot3d import Axes3D
import streakline
#import streakline2
import myutils
import ffwd
from streams import load_stream, vcirc_potential, store_progparams, wrap_angles, progenitor_prior
#import streams
import astropy
import astropy.units as u
from astropy.constants import G
from astropy.table import Table
import astropy.coordinates as coord
import gala.coordinates as gc
import scipy.linalg as la
import scipy.interpolate
import scipy.optimize
import zscale
import itertools
import copy
import pickle
# observers
# defaults taken as in astropy v2.0 icrs
mw_observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 0*u.deg, 'galcen_coord': coord.SkyCoord(ra=266.4051*u.deg, dec=-28.936175*u.deg, frame='icrs')}
vsun = {'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
vsun0 = {'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
gc_observer = {'z_sun': 27.*u.pc, 'galcen_distance': 0.1*u.kpc, 'roll': 0*u.deg, 'galcen_coord': coord.SkyCoord(ra=266.4051*u.deg, dec=-28.936175*u.deg, frame='icrs')}
vgc = {'vcirc': 0*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
vgc0 = {'vcirc': 0*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
MASK = -9999
pparams_fid = [np.log10(0.5e10)*u.Msun, 0.7*u.kpc, np.log10(6.8e10)*u.Msun, 3*u.kpc, 0.28*u.kpc, 430*u.km/u.s, 30*u.kpc, 1.57*u.rad, 1*u.Unit(1), 1*u.Unit(1), 1*u.Unit(1), 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0*u.deg, 0*u.deg, 0*u.kpc, 0*u.km/u.s, 0*u.mas/u.yr, 0*u.mas/u.yr]
#pparams_fid = [0.5e-5*u.Msun, 0.7*u.kpc, 6.8e-5*u.Msun, 3*u.kpc, 0.28*u.kpc, 430*u.km/u.s, 30*u.kpc, 1.57*u.rad, 1*u.Unit(1), 1*u.Unit(1), 1*u.Unit(1), 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0*u.deg, 0*u.deg, 0*u.kpc, 0*u.km/u.s, 0*u.mas/u.yr, 0*u.mas/u.yr]
class Stream():
def __init__(self, x0=[]*u.kpc, v0=[]*u.km/u.s, progenitor={'coords': 'galactocentric', 'observer': {}, 'pm_polar': False}, potential='nfw', pparams=[], minit=2e4*u.Msun, mfinal=2e4*u.Msun, rcl=20*u.pc, dr=0.5, dv=2*u.km/u.s, dt=1*u.Myr, age=6*u.Gyr, nstars=600, integrator='lf'):
"""Initialize """
setup = {}
if progenitor['coords']=='galactocentric':
setup['x0'] = x0
setup['v0'] = v0
elif (progenitor['coords']=='equatorial') & (len(progenitor['observer'])!=0):
if progenitor['pm_polar']:
a = v0[1].value
phi = v0[2].value
v0[1] = a*np.sin(phi)*u.mas/u.yr
v0[2] = a*np.cos(phi)*u.mas/u.yr
# convert positions
xeq = coord.SkyCoord(x0[0], x0[1], x0[2], **progenitor['observer'])
xgal = xeq.transform_to(coord.Galactocentric)
setup['x0'] = [xgal.x.to(u.kpc), xgal.y.to(u.kpc), xgal.z.to(u.kpc)]*u.kpc
# convert velocities
setup['v0'] = gc.vhel_to_gal(xeq.icrs, rv=v0[0], pm=v0[1:], **vsun)
#setup['v0'] = [v.to(u.km/u.s) for v in vgal]*u.km/u.s
else:
raise ValueError('Observer position needed!')
setup['dr'] = dr
setup['dv'] = dv
setup['minit'] = minit
setup['mfinal'] = mfinal
setup['rcl'] = rcl
setup['dt'] = dt
setup['age'] = age
setup['nstars'] = nstars
setup['integrator'] = integrator
setup['potential'] = potential
setup['pparams'] = pparams
self.setup = setup
self.setup_aux = {}
self.fill_intid()
self.fill_potid()
self.st_params = self.format_input()
def fill_intid(self):
"""Assign integrator ID for a given integrator choice
Assumes setup dictionary has an 'integrator' key"""
if self.setup['integrator']=='lf':
self.setup_aux['iaux'] = 0
elif self.setup['integrator']=='rk':
self.setup_aux['iaux'] = 1
def fill_potid(self):
"""Assign potential ID for a given potential choice
Assumes d has a 'potential' key"""
if self.setup['potential']=='nfw':
self.setup_aux['paux'] = 3
elif self.setup['potential']=='log':
self.setup_aux['paux'] = 2
elif self.setup['potential']=='point':
self.setup_aux['paux'] = 0
elif self.setup['potential']=='gal':
self.setup_aux['paux'] = 4
elif self.setup['potential']=='lmc':
self.setup_aux['paux'] = 6
elif self.setup['potential']=='dipole':
self.setup_aux['paux'] = 8
elif self.setup['potential']=='quad':
self.setup_aux['paux'] = 9
elif self.setup['potential']=='octu':
self.setup_aux['paux'] = 10
def format_input(self):
"""Format input parameters for streakline.stream"""
p = [None]*12
# progenitor position
p[0] = self.setup['x0'].si.value
p[1] = self.setup['v0'].si.value
# potential parameters
p[2] = [x.si.value for x in self.setup['pparams']]
# stream smoothing offsets
p[3] = [self.setup['dr'], self.setup['dv'].si.value]
# potential and integrator choice
p[4] = self.setup_aux['paux']
p[5] = self.setup_aux['iaux']
# number of steps and stream stars
p[6] = int(self.setup['age']/self.setup['dt'])
p[7] = int(p[6]/self.setup['nstars'])
# cluster properties
p[8] = self.setup['minit'].si.value
p[9] = self.setup['mfinal'].si.value
p[10] = self.setup['rcl'].si.value
# time step
p[11] = self.setup['dt'].si.value
return p
def generate(self):
"""Create streakline model for a stream of set parameters"""
#xm1, xm2, xm3, xp1, xp2, xp3, vm1, vm2, vm3, vp1, vp2, vp3 = streakline.stream(*p)
stream = streakline.stream(*self.st_params)
self.leading = {}
self.leading['x'] = stream[:3]*u.m
self.leading['v'] = stream[6:9]*u.m/u.s
self.trailing = {}
self.trailing['x'] = stream[3:6]*u.m
self.trailing['v'] = stream[9:12]*u.m/u.s
def observe(self, mode='cartesian', wangle=0*u.deg, units=[], errors=[], nstars=-1, sequential=False, present=[], logerr=False, observer={'z_sun': 0.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 0*u.deg, 'galcen_ra': 300*u.deg, 'galcen_dec': 20*u.deg}, vobs={'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}, footprint='none', rotmatrix=None):
"""Observe the stream
stream.obs holds all observations
stream.err holds all errors"""
x = np.concatenate((self.leading['x'].to(u.kpc).value, self.trailing['x'].to(u.kpc).value), axis=1) * u.kpc
v = np.concatenate((self.leading['v'].to(u.km/u.s).value, self.trailing['v'].to(u.km/u.s).value), axis=1) * u.km/u.s
if mode=='cartesian':
# returns coordinates in following order
# x(x, y, z), v(vx, vy, vz)
if len(units)<2:
units.append(self.trailing['x'].unit)
units.append(self.trailing['v'].unit)
if len(errors)<2:
errors.append(0.2*u.kpc)
errors.append(2*u.km/u.s)
# positions
x = x.to(units[0])
ex = np.ones(np.shape(x))*errors[0]
ex = ex.to(units[0])
# velocities
v = v.to(units[1])
ev = np.ones(np.shape(v))*errors[1]
ev = ev.to(units[1])
self.obs = np.concatenate([x,v]).value
self.err = np.concatenate([ex,ev]).value
elif mode=='equatorial':
# assumes coordinates in the following order:
# ra, dec, distance, vrad, mualpha, mudelta
if len(units)!=6:
units = [u.deg, u.deg, u.kpc, u.km/u.s, u.mas/u.yr, u.mas/u.yr]
if len(errors)!=6:
errors = [0.2*u.deg, 0.2*u.deg, 0.5*u.kpc, 1*u.km/u.s, 0.2*u.mas/u.yr, 0.2*u.mas/u.yr]
# define reference frame
xgal = coord.Galactocentric(x, **observer)
#frame = coord.Galactocentric(**observer)
# convert
xeq = xgal.transform_to(coord.ICRS)
veq = gc.vgal_to_hel(xeq, v, **vobs)
# store coordinates
ra, dec, dist = [xeq.ra.to(units[0]).wrap_at(wangle), xeq.dec.to(units[1]), xeq.distance.to(units[2])]
vr, mua, mud = [veq[2].to(units[3]), veq[0].to(units[4]), veq[1].to(units[5])]
obs = np.hstack([ra, dec, dist, vr, mua, mud]).value
obs = np.reshape(obs,(6,-1))
if footprint=='sdss':
infoot = dec > -2.5*u.deg
obs = obs[:,infoot]
if np.allclose(rotmatrix, np.eye(3))!=1:
xi, eta = myutils.rotate_angles(obs[0], obs[1], rotmatrix)
obs[0] = xi
obs[1] = eta
self.obs = obs
# store errors
err = np.ones(np.shape(self.obs))
if logerr:
for i in range(6):
err[i] *= np.exp(errors[i].to(units[i]).value)
else:
for i in range(6):
err[i] *= errors[i].to(units[i]).value
self.err = err
self.obsunit = units
self.obserror = errors
# randomly select nstars from the stream
if nstars>-1:
if sequential:
select = np.linspace(0, np.shape(self.obs)[1], nstars, endpoint=False, dtype=int)
else:
select = np.random.randint(low=0, high=np.shape(self.obs)[1], size=nstars)
self.obs = self.obs[:,select]
self.err = self.err[:,select]
# include only designated dimensions
if len(present)>0:
self.obs = self.obs[present]
self.err = self.err[present]
self.obsunit = [ self.obsunit[x] for x in present ]
self.obserror = [ self.obserror[x] for x in present ]
def prog_orbit(self):
"""Generate progenitor orbital history"""
orbit = streakline.orbit(self.st_params[0], self.st_params[1], self.st_params[2], self.st_params[4], self.st_params[5], self.st_params[6], self.st_params[11], -1)
self.orbit = {}
self.orbit['x'] = orbit[:3]*u.m
self.orbit['v'] = orbit[3:]*u.m/u.s
def project(self, name, N=1000, nbatch=-1):
"""Project the stream from observed to native coordinates"""
poly = np.loadtxt("../data/{0:s}_all.txt".format(name))
self.streak = np.poly1d(poly)
self.streak_x = np.linspace(np.min(self.obs[0])-2, np.max(self.obs[0])+2, N)
self.streak_y = np.polyval(self.streak, self.streak_x)
self.streak_b = np.zeros(N)
self.streak_l = np.zeros(N)
pdot = np.polyder(poly)
for i in range(N):
length = scipy.integrate.quad(self._delta_path, self.streak_x[0], self.streak_x[i], args=(pdot,))
self.streak_l[i] = length[0]
XB = np.transpose( | np.vstack([self.streak_x, self.streak_y]) | numpy.vstack |
"""
Functions for evaluating model performance.
"""
# Import the necessary libraries
import os
import numpy as np
import torch
from numpy import dot
from numpy.linalg import norm
from PIL import Image
from skimage.metrics import peak_signal_noise_ratio
from skimage.metrics import structural_similarity as compare_ssim
# Import the necessary source codes
from Preprocessing.distort_images import distort_image
from Preprocessing.utils import ycbcr2rgb
# Initialise the default device
DEVICE = torch.device("cuda:3" if torch.cuda.is_available() else "cpu")
def metric_values(upsampled_images, metrics):
metric_results = []
for i in range(len(upsampled_images)):
sub_metric_results = {}
for metric in metrics:
sub_metric_results[f"{metric.__name__}"] = []
for crop in upsampled_images[i][2]:
sub_metric_results[f"{metric.__name__}"].append(metric(upsampled_images[i][0], crop))
metric_results.append(sub_metric_results)
return metric_results
def cos(v1, v2):
v1_1d = v1.ravel()
v2_1d = v2.ravel()
d = dot(v1_1d, v2_1d)
n = norm(v1_1d) * norm(v2_1d)
return d / n
def evaluate_model(path, model, pixel_mean, pixel_std, SR_FACTOR=3, sigma=1):
"""
Computes average Peak Signal to Noise Ratio (PSNR) and mean structural similarity
index (SSIM) over a set of target images and their super resolved versions.
Args:
path (string): relative path to directory containing images for evaluation
model (PyTorch model): the model to be evaluated
pixel_mean (float): mean luminance value to be used for standardization
pixel_std (float): std. dev. of luminance value to be used for standardization
SR_FACTOR (int): super resolution factor
sigma (int): the std. dev. to use for the gaussian blur
"""
# Store all the training images based on their extension
# Add support for images as BMP, JPG, PNG files
img_names = [im for im in os.listdir(path) if im[-4:] in {'.bmp' '.jpg', '.png'}]
# To store the error values
blurred_img_psnrs = []
out_img_psnrs = []
blurred_img_ssims = []
out_img_ssims = []
# Iterate through the images
for test_im in img_names:
# Generate the distorted image
blurred_test_im = distort_image(path=path + test_im, factor=SR_FACTOR, sigma=sigma)
imageFile = Image.open(path + test_im)
# Convert the image from the RGB to the YCbCr color coding with float data-type to give an enhanced color
# contrast
im = np.array(imageFile.convert('YCbCr'))
# Normalize the images into the standard size of 256 pixels
model_input = blurred_test_im[:, :, 0] / 255.0
# As the Mean and Standard Deviation have been pre-computed
# Standardize the images by subtracting Mean and Dividing Standard Deviation
model_input -= pixel_mean
model_input /= pixel_std
# Build a Tensor out of the images and set it to the default device
im_out_Y = model(torch.tensor(model_input,
dtype=torch.float).unsqueeze(0).unsqueeze(0).to(DEVICE))
im_out_Y = im_out_Y.detach().squeeze().squeeze().cpu().numpy().astype(np.float64)
im_out_viz = np.zeros((im_out_Y.shape[0], im_out_Y.shape[1], 3))
# Unstandardize the images by Multiplying Standard Deviation and Adding Mean
im_out_Y = (im_out_Y * pixel_std) + pixel_mean
# Un-normalize the images
im_out_Y *= 255.0
im_out_viz[:, :, 0] = im_out_Y
im_out_viz[:, :, 1] = im[:, :, 1]
im_out_viz[:, :, 2] = im[:, :, 2]
im_out_viz[:, :, 0] = np.around(im_out_viz[:, :, 0])
# Compute the Peak Signal to Noise Ratio
# Refer to the documentation of the function in the source file
blur_psnr = peak_signal_noise_ratio(ycbcr2rgb(im), ycbcr2rgb(blurred_test_im))
sr_psnr = peak_signal_noise_ratio(ycbcr2rgb(im), ycbcr2rgb(im_out_viz))
# Store the results
blurred_img_psnrs.append(blur_psnr)
out_img_psnrs.append(sr_psnr)
# Compute the Structural Similarity Index
# Refer to the documentation of the function in the source file
blur_ssim = compare_ssim(ycbcr2rgb(im), ycbcr2rgb(blurred_test_im), multichannel=True)
sr_ssim = compare_ssim(ycbcr2rgb(im), ycbcr2rgb(im_out_viz), multichannel=True)
# Store the results
blurred_img_ssims.append(blur_ssim)
out_img_ssims.append(sr_ssim)
# Compute the mean of the results obtained
mean_blur_psnr = np.mean(np.array(blurred_img_psnrs))
mean_sr_psnr = np.mean( | np.array(out_img_psnrs) | numpy.array |
'''
More factorization code, courtesy of <NAME>.
'''
import numpy as np
import dataclasses
def moments(muhat_row,Sighat_row,muhat_col,Sighat_col,**kwargs):
row_m2 = Sighat_row + np.einsum('ij,ik->ijk',muhat_row,muhat_row)
col_m2 = Sighat_col + np.einsum('ij,ik->ijk',muhat_col,muhat_col)
mn= muhat_row @ muhat_col.T
e2 = np.einsum('ajk,bjk -> ab',row_m2,col_m2)
vr = e2-mn**2
return row_m2,col_m2,mn,vr
def prior_KL(muhat,Sighat,mu,Sig):
df = muhat - mu[None,:]
m2 = Sighat + np.einsum('ij,ik->ijk',df,df)
mahaltr = np.sum(np.linalg.inv(Sig)[None,:,:]*m2)
nobs = np.prod(muhat.shape)
sigdet_prior = muhat.shape[0]* | np.linalg.slogdet(Sig) | numpy.linalg.slogdet |
from __future__ import print_function, absolute_import, division
import sys
import zmq
import numpy as np
if sys.version_info > (3,):
buffer = memoryview
def recvMatrix(socket):
"""
Receive a numpy array over zmq
:param socket: (zmq socket)
:return: (Numpy matrix)
"""
metadata = socket.recv_json()
msg = socket.recv(copy=True, track=False)
A = np.frombuffer(buffer(msg), dtype=metadata['dtype'])
return A.reshape(metadata['shape'])
def sendMatrix(socket, mat):
"""
Send a numpy mat with metadata over zmq
:param socket:
:param mat: (numpy matrix)
"""
metadata = dict(
dtype=str(mat.dtype),
shape=mat.shape,
)
# SNDMORE flag specifies this is a multi-part message
socket.send_json(metadata, flags=zmq.SNDMORE)
return socket.send(mat, flags=0, copy=True, track=False)
def getActions(delta_pos, n_actions):
"""
Get list of possible actions
:param delta_pos: (float)
:param n_actions: (int)
:return: (numpy matrix)
"""
possible_deltas = [i * delta_pos for i in range(-1, 2)]
actions = []
for dx in possible_deltas:
for dy in possible_deltas:
for dz in possible_deltas:
if dx == 0 and dy == 0 and dz == 0:
continue
# Allow only move in one direction
if abs(dx) + abs(dy) + abs(dz) > delta_pos:
continue
actions.append([dx, dy, dz])
assert len(actions) == n_actions, "Wrong number of actions: {}".format(len(actions))
return | np.array(actions) | numpy.array |
import pandas as pd
import numpy as np
import cv2, utils
import random
import copy
import threading
import itertools
import numpy.random as npr
pos_max_overlaps = 0.7
neg_min_overlaps = 0.3
batchsize = 256
fraction = 0.5
RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Give the positive RPN examples weight of p * 1 / {num positives}
# and give negatives a weight of (1 - p)
# Set to -1.0 to use uniform example weighting
RPN_POSITIVE_WEIGHT = -1.0
img_channel_mean = [103.939, 116.779, 123.68]
img_scaling_factor = 1.0
def _generate_all_bbox(feat_h, feat_w, feat_stride = 16, num_anchors = 9):
# Create lattice (base points to shift anchors)
shift_x = np.arange(0, feat_w) * feat_stride
shift_y = np.arange(0, feat_h) * feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),shift_x.ravel(), shift_y.ravel())).transpose()
# Create all bbox
A = num_anchors
K = len(shifts) # number of base points = feat_h * feat_w
anchors = utils.anchor()
bbox = anchors.reshape(1, A, 4) + shifts.reshape(K, 1, 4)
bbox = bbox.reshape(K * A, 4)
return bbox
def get_img_by_name(df,ind,size=(640,300)):
file_name = df['File_Path'][ind]
#print(file_name)
img = cv2.imread(file_name)
img_size = np.shape(img)
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img = cv2.resize(img,size)
name_str = file_name.split('/')
name_str = name_str[-1]
#print(name_str)
#print(file_name)
bb_boxes = df[df['Frame'] == name_str].reset_index()
img_size_post = np.shape(img)
#TODO,(add data augment support)
bb_boxes['xmin'] = np.round(bb_boxes['xmin']/img_size[1]*img_size_post[1])
bb_boxes['xmax'] = np.round(bb_boxes['xmax']/img_size[1]*img_size_post[1])
bb_boxes['ymin'] = np.round(bb_boxes['ymin']/img_size[0]*img_size_post[0])
bb_boxes['ymax'] = np.round(bb_boxes['ymax']/img_size[0]*img_size_post[0])
bb_boxes['Area'] = (bb_boxes['xmax']- bb_boxes['xmin'])*(bb_boxes['ymax']- bb_boxes['ymin'])
#bb_boxes = bb_boxes[bb_boxes['Area']>400]
return name_str,img,bb_boxes
def batch_generate(data, batch_size = 1):
while 1:
for i_batch in range(batch_size):
i_line = np.random.randint(len(data))
name_str, img, bb_boxes = get_img_by_name(data, i_line, size = (960, 640))
#print(name_str)
#TODO
#create anchor based groundtruth here(target bbox(1,40,60,9) & target regression(1,40,60,36))
gta = np.zeros((len(bb_boxes), 4))
#print(gta.shape)
#bbox groundtruth before bbox_encode
for i in range(len(bb_boxes)):
gta[i, 0] = int(bb_boxes.iloc[i]['xmin'])
gta[i, 1] = int(bb_boxes.iloc[i]['ymin'])
gta[i, 2] = int(bb_boxes.iloc[i]['xmax'])
gta[i, 3] = int(bb_boxes.iloc[i]['ymax'])
#print(gta)
x_img = img.astype(np.float32)
x_img[:, :, 0] -= img_channel_mean[0]
x_img[:, :, 1] -= img_channel_mean[1]
x_img[:, :, 2] -= img_channel_mean[2]
x_img /= img_scaling_factor
x_img = np.expand_dims(x_img, axis=0)
#label generate( regression target & postive/negative samples)
y_rpn_cls,y_rpn_regr = label_generate(img, gta)
#y_rpn_cls,y_rpn_regr = utils.calc_rpn(bb_boxes, gta)
#print(y_rpn_cls)
#print(y_rpn_regr.shape)
yield x_img, [np.copy(y_rpn_cls), np.copy(y_rpn_regr)], gta
#TODO
#fast version(inprogress)
def label_generate(img, gta):
#inti base matrix
(output_width, output_height) = (60, 40)
num_anchors = 9
#40,60,9
#40,60,9,4
y_rpn_overlap = np.zeros((output_height, output_width, num_anchors))
y_is_box_valid = np.zeros((output_height, output_width, num_anchors))
y_rpn_regr = np.zeros((output_height * output_width * num_anchors , 4))
#anchor box generate(generate anchors in each shifts box)
anchor_box = _generate_all_bbox(output_width, output_height)
total_anchors = anchor_box.shape[0]
#print('the shape of anchor_box', np.asarray(anchor_box).shape)
#print('the total number os anchors',total_anchors)
#Only inside anchors are valid
_allowed_border = 0
im_info = img.shape[:2]
inds_inside = np.where(
(anchor_box[:, 0] >= -_allowed_border) &
(anchor_box[:, 1] >= -_allowed_border) &
(anchor_box[:, 2] < im_info[1] + _allowed_border) & # width
(anchor_box[:, 3] < im_info[0] + _allowed_border) # height
)[0]
#print('inside anchor index',inds_inside)
#print('number of valid anchors',len(inds_inside))
valid_anchors = anchor_box[inds_inside, :]
#print('valid_anchors display',valid_anchors)
#print('shape of valid_anchors',np.asarray(valid_anchors).shape)
y_rpn_regr[inds_inside] = anchor_box[inds_inside, :]
#print('rpn overlap display', y_rpn_regr)
#print('shape of rpn overlap',np.asarray(y_rpn_regr).shape)
#print('rpn overlap[inds_inside] display', y_rpn_regr[inds_inside])
#print('shape of inds_inside rpn overlaps',np.asarray(y_rpn_regr[inds_inside]).shape)
#calculate iou(overlaps)
#print('y_rpn_overlap')
overlaps = utils.bbox_overlaps(np.ascontiguousarray(y_rpn_regr, dtype=np.float),np.ascontiguousarray(gta, dtype=np.float))
argmax_overlaps = overlaps.argmax(axis=1)
max_overlaps = np.zeros((output_height * output_width * num_anchors))
max_overlaps[inds_inside] = overlaps[np.arange(len(inds_inside)), argmax_overlaps[inds_inside]]
gt_argmax_overlaps = overlaps.argmax(axis=0)
gt_max_overlaps = overlaps[gt_argmax_overlaps,np.arange(overlaps.shape[1])]
gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0]
#print('overlaps display',overlaps)
#print('shape of overlaps', np.asarray(overlaps).shape)
#print('argmax_overlaps', argmax_overlaps)
#print('shape of argmax_overlaps',argmax_overlaps.shape)
#print('max overlaps display', max_overlaps)
#print('total number of max overlaps', len(max_overlaps))
#print('shape of max overlaps', max_overlaps.shape)
#print('gt_max_overlaps display', gt_max_overlaps)
#print('total number of gt_max_overlaps', len(gt_max_overlaps))
#print('gt_argmax_overlaps', gt_argmax_overlaps)
#print('number of gt_argmax_overlaps', len(gt_argmax_overlaps))
#y_rpn_overlap, y_is_box_valid
y_rpn_overlap = y_rpn_overlap.reshape(output_height * output_width * num_anchors)
y_is_box_valid = y_is_box_valid.reshape(output_height * output_width * num_anchors)
#negative
#print('shape of y_rpn_overlap', y_rpn_overlap.shape)
#print('shape of y_is_box_valid',y_is_box_valid.shape)
y_rpn_overlap[max_overlaps < neg_min_overlaps] = 0
y_is_box_valid[inds_inside] = 1
#y_is_box_valid[max_overlaps < neg_min_overlaps] = 1#not good way to set all box as valid, because we also have outside box here
#neutral
#np.logical_and
y_rpn_overlap[np.logical_and(neg_min_overlaps < max_overlaps, max_overlaps < pos_max_overlaps)] = 0
y_is_box_valid[np.logical_and(neg_min_overlaps < max_overlaps, max_overlaps < pos_max_overlaps)] = 0
#y_rpn_overlap[neg_min_overlaps < max_overlaps and max_overlaps < pos_max_overlaps] = 0
#y_is_box_valid[neg_min_overlaps < max_overlaps and max_overlaps < pos_max_overlaps] = 0
#positive
y_rpn_overlap[gt_argmax_overlaps] = 1
y_is_box_valid[gt_argmax_overlaps] = 1
y_rpn_overlap[max_overlaps >= pos_max_overlaps] = 1
y_is_box_valid[max_overlaps >= pos_max_overlaps] = 1
# subsample positive labels if we have too many
num_fg = int(fraction * batchsize)
#print('balanced fg',num_fg)
disable_inds = []
fg_inds = np.where( | np.logical_and(y_rpn_overlap == 1, y_is_box_valid == 1) | numpy.logical_and |
import numpy as np
from astropy.io import fits
from astropy.table import Table, vstack
from astropy.wcs import WCS
import os
import argparse
import logging, traceback
import time
import pandas as pd
from copy import copy, deepcopy
from config import solid_angle_dpi_fname, rt_dir
from sqlite_funcs import get_conn, make_timeIDs
from wcs_funcs import world2val
from event2dpi_funcs import det2dpis, mask_detxy
from models import Bkg_Model_wFlatA, CompoundModel,\
im_dist, Point_Source_Model_Binned_Rates, Source_Model_InOutFoV
from flux_models import Plaw_Flux, Cutoff_Plaw_Flux
from gti_funcs import add_bti2gti, bti2gti, gti2bti, union_gtis
from ray_trace_funcs import RayTraces
from coord_conv_funcs import convert_radec2imxy, convert_imxy2radec, imxy2theta_phi, theta_phi2imxy
from LLH import LLH_webins
from minimizers import NLLH_ScipyMinimize_Wjacob, NLLH_ScipyMinimize
from do_llh_inFoV4realtime2 import do_scan_around_peak, find_peaks2scan, parse_bkg_csv
def cli():
parser = argparse.ArgumentParser()
parser.add_argument('--evfname', type=str,\
help="Event data file",
default='filter_evdata.fits')
parser.add_argument('--dmask', type=str,\
help="Detmask fname",
default='detmask.fits')
parser.add_argument('--job_id', type=int,\
help="ID to tell it what seeds to do",\
default=-1)
parser.add_argument('--Njobs', type=int,\
help="Total number of jobs submitted",\
default=64)
parser.add_argument('--Ntrials', type=int,\
help="Number of trials to run",\
default=8)
parser.add_argument('--Afact', type=float,\
help="A factor to use",\
default=1.0)
parser.add_argument('--theta', type=float,\
help="Theta to sim at",\
default=0.0)
parser.add_argument('--phi', type=float,\
help="phi to sim at",\
default=0.0)
parser.add_argument('--trig_time', type=float,\
help="trigger time to center sims at",\
default=0.0)
parser.add_argument('--dbfname', type=str,\
help="Name to save the database to",\
default=None)
parser.add_argument('--pcfname', type=str,\
help="partial coding file name",\
default='pc_2.img')
parser.add_argument('--bkg_fname', type=str,\
help="Name of the file with the bkg fits",\
default='bkg_estimation.csv')
parser.add_argument('--log_fname', type=str,\
help="Name for the log file",\
default='sim_and_min')
args = parser.parse_args()
return args
def mk_sim_evdata(sim_mod, sim_params, dur, tstart):
# first make sim DPIs
# then make an event for each count
# so each event will have the DETX, DETY and
# it can just be given the energy of middle of the ebin
# then can assign times with just a uniform distribution
# from tstart to tstop
# then sort the events and return the Table
col_names = ['TIME', 'DET_ID', 'EVENT_FLAGS', 'PHA', 'DETX', 'DETY',\
'PI', 'ENERGY']
# only need to assign, TIME, DETX, and DETY
# make all flags 0, and the rest don't matter
tab = Table(names=['DETX', 'DETY', 'ENERGY'], dtype=(np.int, np.int, np.float))
ebins0 = sim_mod.ebins0
ebins1 = sim_mod.ebins1
rate_dpis = sim_mod.get_rate_dpis(sim_params)
sim_dpis = np.random.poisson(lam=(rate_dpis*dur))
for ebin, sim_dpi in enumerate(sim_dpis):
simdpi = np.zeros_like(sim_mod.bl_dmask, dtype=np.int)
simdpi[sim_mod.bl_dmask] = sim_dpi
detys, detxs = np.where(simdpi>0)
emid = (ebins1[ebin]+ebins0[ebin])/2.
for jj in range(len(detys)):
dety = detys[jj]
detx = detxs[jj]
for ii in range(simdpi[dety,detx]):
row = (detx, dety, emid)
tab.add_row(row)
tab['TIME'] = dur*np.random.random(size=len(tab)) + tstart
tab['DET_ID'] = np.zeros(len(tab), dtype=np.int)
tab['PHA'] = np.ones(len(tab), dtype=np.int)
tab['EVENT_FLAGS'] = np.zeros(len(tab), dtype=np.int)
tab['PI'] = np.rint(tab['ENERGY']*10).astype(np.int)
tab.sort(keys='TIME')
return tab
def analysis_for_imxy_square(imx0, imx1, imy0, imy1, bkg_bf_params_list,\
bkg_mod, sig_mod, ev_data,\
ebins0, ebins1, tbins0, tbins1,\
timeIDs, TS2keep=4.5,\
max_frac2keep=0.75, minTS2scan=6.0):
bl_dmask = bkg_mod.bl_dmask
# dimxy = 0.0025
dimxy = np.round(imx1 - imx0, decimals=4)
imstep = 0.003
imxstep = 0.004
# imx_ax = np.arange(imx0, imx1+dimxy/2., dimxy)
# imy_ax = np.arange(imy0, imy1+dimxy/2., dimxy)
# imxg,imyg = np.meshgrid(imx_ax, imy_ax)
# imx_ax = np.arange(imx0, imx1, imxstep)
# imy_ax = np.arange(imy0, imy1, imstep)
imx_ax = np.arange(0, dimxy, imxstep)
imy_ax = np.arange(0, dimxy, imstep)
imxg,imyg = np.meshgrid(imx_ax, imy_ax)
bl = np.isclose((imyg*1e4).astype(np.int)%int(imstep*2*1e4),0)
imxg[bl] += imxstep/2.
imxs = np.ravel(imxg) + imx0
imys = np.ravel(imyg) + imy0
Npnts = len(imxs)
print(Npnts)
logging.info("%d imxy points to do" %(Npnts))
thetas, phis = imxy2theta_phi(imxs, imys)
gamma_ax = np.linspace(-0.4, 1.6, 8+1)
gamma_ax = np.linspace(-0.4, 1.6, 4+1)[1:-1]
# gamma_ax = np.array([0.4, 0.9])
# gamma_ax = np.linspace(-0.4, 1.6, 3+1)
Epeak_ax = np.logspace(np.log10(45.0), 3, 10+1)
Epeak_ax = np.logspace(np.log10(45.0), 3, 5+1)[1:-1]
Epeak_ax = np.logspace(np.log10(45.0), 3, 4+1)[1:-1]
# Epeak_ax = np.logspace(np.log10(45.0), 3, 5+1)[3:]
logging.info("Epeak_ax: ")
logging.info(Epeak_ax)
logging.info("gammas_ax: ")
logging.info(gamma_ax)
# Epeak_ax = np.logspace(np.log10(25.0), 3, 3+1)
gammas, Epeaks = np.meshgrid(gamma_ax, Epeak_ax)
gammas = gammas.ravel()
Epeaks = Epeaks.ravel()
Nspec_pnts = len(Epeaks)
ntbins = len(tbins0)
# rt_obj = RayTraces(rt_dir)
# fp_obj = FootPrints(fp_dir)
# sig_mod = Source_Model_InOutFoV(flux_mod, [ebins0,ebins1], bl_dmask,\
# rt_obj, use_deriv=True)
# sig_mod.set_theta_phi(np.mean(thetas), np.mean(phis))
comp_mod = CompoundModel([bkg_mod, sig_mod])
sig_miner = NLLH_ScipyMinimize_Wjacob('')
tmin = np.min(tbins0)
tmax = np.max(tbins1)
if (tmax - tmin) > 40.0:
logging.debug("tmax - tmin > 40.0s, using twinds for tbl")
gti_dict = {'START':tbins0,'STOP':tbins1}
gti_twinds = Table(data=gti_dict)
gtis = union_gtis([gti_twinds])
tbl = mk_gti_bl(ev_data['TIME'], gtis, time_pad=0.1)
logging.debug("np.sum(tbl): %d"%(np.sum(tbl)))
else:
tbl = (ev_data['TIME']>=(tmin-1.0))&(ev_data['TIME']<(tmax+1.0))
logging.debug("np.sum(tbl): %d"%(np.sum(tbl)))
sig_llh_obj = LLH_webins(ev_data[tbl], ebins0, ebins1, bl_dmask, has_err=True)
sig_llh_obj.set_model(comp_mod)
flux_params = {'A':1.0, 'gamma':0.5, 'Epeak':1e2}
bkg_name = bkg_mod.name
pars_ = {}
pars_['Signal_theta'] = np.mean(thetas)
pars_['Signal_phi'] = np.mean(phis)
for pname,val in bkg_bf_params_list[0].items():
# pars_['Background_'+pname] = val
pars_[bkg_name+'_'+pname] = val
for pname,val in flux_params.items():
pars_['Signal_'+pname] = val
sig_miner.set_llh(sig_llh_obj)
fixed_pnames = list(pars_.keys())
fixed_vals = list(pars_.values())
trans = [None for i in range(len(fixed_pnames))]
sig_miner.set_trans(fixed_pnames, trans)
sig_miner.set_fixed_params(fixed_pnames, values=fixed_vals)
sig_miner.set_fixed_params(['Signal_A'], fixed=False)
res_dfs_ = []
for ii in range(Npnts):
print(imxs[ii], imys[ii])
print(thetas[ii], phis[ii])
sig_miner.set_fixed_params(['Signal_theta', 'Signal_phi'],\
values=[thetas[ii],phis[ii]])
res_dfs = []
for j in range(Nspec_pnts):
flux_params['gamma'] = gammas[j]
flux_params['Epeak'] = Epeaks[j]
sig_mod.set_flux_params(flux_params)
res_dict = {}
res_dict['Epeak'] = Epeaks[j]
res_dict['gamma'] = gammas[j]
nllhs = np.zeros(ntbins)
As = np.zeros(ntbins)
for i in range(ntbins):
parss_ = {}
for pname,val in bkg_bf_params_list[i].items():
# pars_['Background_'+pname] = val
parss_[bkg_name+'_'+pname] = val
sig_miner.set_fixed_params(list(parss_.keys()), values=list(parss_.values()))
t0 = tbins0[i]
t1 = tbins1[i]
dt = t1 - t0
sig_llh_obj.set_time(tbins0[i], tbins1[i])
try:
pars, nllh, res = sig_miner.minimize()
As[i] = pars[0][0]
nllhs[i] = nllh[0]
except Exception as E:
logging.error(E)
logging.error(traceback.format_exc())
logging.error("Failed to minimize seed: ")
logging.error((imxs[ii],imys[ii]))
logging.error((timeIDs[i]))
nllhs[i] = np.nan
# print "res: "
# print res
res_dict['nllh'] = nllhs
res_dict['A'] = As
res_dict['time'] = np.array(tbins0)
res_dict['dur'] = np.array(tbins1)-np.array(tbins0)
res_dict['timeID'] = np.array(timeIDs)
res_dict['theta'] = thetas[ii]
res_dict['phi'] = phis[ii]
res_dict['imx'] = imxs[ii]
res_dict['imy'] = imys[ii]
res_dfs.append(pd.DataFrame(res_dict))
# logging.info("Done with spec %d of %d" %(j+1,Nspec_pnts))
res_df = pd.concat(res_dfs, ignore_index=True)
bkg_nllhs = np.zeros(len(res_df))
bkg_bf_param_dict = {}
for i in range(ntbins):
t0 = tbins0[i]
t1 = tbins1[i]
dt = t1 - t0
sig_llh_obj.set_time(tbins0[i], tbins1[i])
for pname,val in bkg_bf_params_list[i].items():
pars_[bkg_name+'_'+pname] = val
bkg_bf_param_dict[timeIDs[i]] = bkg_bf_params_list[i]
pars_['Signal_theta'] = thetas[ii]
pars_['Signal_phi'] = phis[ii]
pars_['Signal_A'] = 1e-10
bkg_nllh = -sig_llh_obj.get_logprob(pars_)
bl = np.isclose(res_df['time']-t0,t0-t0)&np.isclose(res_df['dur'],dt)
bkg_nllhs[bl] = bkg_nllh
# pars_['Signal_A'] = 1e-10
# bkg_nllh = -sig_llh_obj.get_logprob(pars_)
res_df['bkg_nllh'] = bkg_nllhs
res_df['TS'] = np.sqrt(2.*(bkg_nllhs - res_df['nllh']))
res_df['TS'][np.isnan(res_df['TS'])] = 0.0
res_dfs_.append(res_df)
logging.info("Done with imxy %d of %d" %(ii+1,Npnts))
res_df = pd.concat(res_dfs_, ignore_index=True)
if TS2keep is not None:
TSbl = (res_df['TS']>=TS2keep)
if np.sum(TSbl) > (len(res_df)/5.0):
TSwrite_ = np.nanpercentile(res_df['TS'], max_frac2keep*100.0)
TSbl = (res_df['TS']>=TSwrite_)
elif np.sum(TSbl) < 1:
TSbl = np.isclose(res_df['TS'],np.max(res_df['TS']))
else:
TSbl = (res_df['TS']>=TS2keep)
res_df = res_df[TSbl]
# minTS2scan = 6.0
if np.max(res_df['TS']) >= minTS2scan:
peaks_df = find_peaks2scan(res_df, minTS=minTS2scan)
Npeaks2scan = len(peaks_df)
else:
Npeaks2scan = 0
logging.info("%d peaks to scan"%(Npeaks2scan))
print(list(bkg_bf_param_dict.keys()))
if Npeaks2scan > 0:
peak_res_dfs = []
for peak_ind, peak_row in peaks_df.iterrows():
bkg_bf_params = bkg_bf_param_dict[str(int(peak_row['timeID']))]
logging.info("Starting to scan peak_row")
logging.info(peak_row)
df = do_scan_around_peak(peak_row, bkg_bf_params, bkg_name,\
sig_miner, sig_llh_obj, sig_mod)
max_peak_row = df.loc[df['TS'].idxmax()]
df2 = do_scan_around_peak(max_peak_row, bkg_bf_params, bkg_name,\
sig_miner, sig_llh_obj, sig_mod,\
imstep=1e-3, dimx=1e-3, dimy=1e-3,\
dgamma=0.1, dlog10Ep=0.1)
peak_res_dfs.append(df)
peak_res_dfs.append(df2)
peak_res_df = pd.concat(peak_res_dfs, ignore_index=True)
return res_df, peak_res_df
else:
return res_df, None
def min_sim_llhs(ev_data, sim_evdata, sim_params, sim_tstart, sim_tstop,\
bkg_fname, solid_ang_dpi, bl_dmask, rt_dir, sig_mod,\
ebins0, ebins1, durs=[0.256, 0.512, 1.024]):
trig_time = sim_tstart
imx_sim, imy_sim = sim_params['imx'], sim_params['imy']
imx_cent = np.round(imx_sim + 2e-3*(np.random.random() - 0.5)*2, decimals=3)
imy_cent = np.round(imy_sim + 2e-3*(np.random.random() - 0.5)*2, decimals=3)
dimxy = 0.016
imx0 = imx_cent - dimxy/2.
imx1 = imx_cent + dimxy/2.
imy0 = imy_cent - dimxy/2.
imy1 = imy_cent + dimxy/2.
evdata_ = ev_data.copy()
print((type(evdata_)))
print((type(sim_evdata)))
evdata = vstack([evdata_, sim_evdata])
evdata.sort('TIME')
bkg_df = pd.read_csv(bkg_fname)
bkg_params_list = []
bkg_df, bkg_name, PSnames, bkg_mod, ps_mods =\
parse_bkg_csv(bkg_fname, solid_ang_dpi,\
ebins0, ebins1, bl_dmask, rt_dir)
bkg_mod.has_deriv = False
bkg_mod_list = [bkg_mod]
Nsrcs = len(ps_mods)
if Nsrcs > 0:
bkg_mod_list += ps_mods
for ps_mod in ps_mods:
ps_mod.has_deriv = False
bkg_mod = CompoundModel(bkg_mod_list)
tbins0_ = []
tbins1_ = []
for dur in durs:
tstep = dur/4.0
tbins0 = np.arange(np.round(np.min(evdata['TIME'])),\
np.max(evdata['TIME']),tstep)
tbins1 = tbins0 + dur
bl = (tbins0<(sim_tstop-(tstep)))&(tbins1>(sim_tstart+(tstep)))
tbins0 = tbins0[bl]
tbins1 = tbins1[bl]
ntbins = np.sum(bl)
print((ntbins, " tbins to do for dur",dur))
for i in range(ntbins):
res_dict = {}
t0 = tbins0[i]
t1 = tbins1[i]
tmid = (t0 + t1)/2.
tbins0_.append(t0)
tbins1_.append(t1)
bkg_row = bkg_df.iloc[np.argmin(np.abs(tmid - bkg_df['time']))]
bkg_params = {pname:bkg_row[pname] for pname in\
bkg_mod.param_names}
bkg_params_list.append(bkg_params)
tbins0 = np.array(tbins0_)
tbins1 = | np.array(tbins1_) | numpy.array |
'''
Layers for NN-models (forward+backward pass).
Written by <NAME> (https://github.com/SLotAbr).
BSD License
'''
# from multiprocessing import Process
import numpy as np
import pickle
from tools.functions import string_softmax
from tools.optimizers import AdaM as AdaM
class token_embedding:
def __init__(self, vocabulary_size, d_model, context_size, optim_param):
self.TE_table = np.random.randn(vocabulary_size, d_model) * 1e-3
self.vocabulary_size = vocabulary_size
self.d_model = d_model
self.context_size = context_size
self.input_field = 0
self.optim = AdaM(optim_param)
def __call__(self, index_list):
# form X matrix from tokens indexes
# We should use 2D array for further concatenation
self.input_indexes = index_list
context =[[self.TE_table[j] for j in index_list]]
return np.concatenate(context, axis=1)
def update_weights(self, dX, dTE_linear):
# dTE_linear - the second part of TE derivative
# TE derivative have 2 parts - so, we'll get it by external source
dTE = np.zeros((self.vocabulary_size, self.d_model))
for i in range(self.context_size):
dTE[self.input_indexes[i]]+= dX[i]
dTE += dTE_linear
self.TE_table = self.optim.weights_update(self.TE_table, dTE)
def linear(self, x):
'''
using token_embeddings as linear layer with bias=0
we'll use it for finding out output token probabilities
:x.shape = [context_size; d_model]
:output.shape = [context_size; vocabulary_size]
'''
self.input_field = x
return [email protected]_table.T
def linear_backward(self, dl):
# returns derivatives for input signal and TE_table
return [email protected]_table, (self.input_field.T@dl).T
def save_weights(self, path):
with open(path, 'wb') as f:
pickle.dump([self.TE_table, self.optim], f)
def restore_weights(self, path):
with open(path, 'rb') as f:
self.TE_table, self.optim = pickle.load(f)
class linear:
def __init__(self, hidden_units, number_of_neurons, optim_param):
# mean = 0, var = 1
self.W = np.random.randn(hidden_units, number_of_neurons) * 1e-3
self.b = np.zeros(number_of_neurons)
self.input_field = 0 # Memory for backpropagation
self.w_optim = AdaM(optim_param)
self.b_optim = AdaM(optim_param)
def __call__(self, x):
self.input_field = x
return (x @ self.W + self.b) #np.dot(x, w) + b
def backward(self, dl):
dw = self.input_field.T @ dl
db = dl.sum(axis=0)
# Updating weights
self.W = self.w_optim.weights_update(self.W, dw)
self.b = self.b_optim.weights_update(self.b, db)
# returns dl for previous layers
return dl @ self.W.T
def save_weights(self, path):
with open(path, 'wb') as f:
pickle.dump([self.W,
self.b,
self.w_optim,
self.b_optim], f)
def restore_weights(self, path):
with open(path, 'rb') as f:
self.W, self.b, self.w_optim, self.b_optim = pickle.load(f)
class ReLU:
def __call__(self, x):
result = np.maximum(0, x)
self.mask = result>0
return result
def backward(self, dl):
return dl * self.mask
class LayerNormalization:
def __init__(self, context_size):
self.context_size = context_size
def __call__(self, x, phase='train'):
'''
I'll delete if-else construction and replace it more
eficient version for evaluation phase later.
There is the same construction in MH_attention_mechanism (__call__ field)
'''
if phase == 'train':
context_size = self.context_size
else:
context_size = x.shape[0]
x_mean = (x.mean(axis=1).reshape(1,context_size)).T
self.x_var = (x.var(axis=1).reshape(1,context_size)).T
return (x-x_mean)/np.sqrt(self.x_var+1e-12)
def backward(self, dl):
l_mean = (dl.mean(axis=1).reshape(1,self.context_size)).T
return (dl - l_mean)/np.sqrt(self.x_var+1e-12)
class MH_attention_mechanism:
def __init__(self, context_size, d_model, H):
self.d_k = 1/np.sqrt(d_model/H)
self.context_size = context_size
self.H = H
# matrix with 'True' values above the main diagonal
# We'll use it for replacing elements in dot product of Q and K
self.mask=(np.tril(np.ones((context_size, context_size)))==0)
self.backward_mask=np.tril(np.ones((context_size, context_size)))
def __call__(self, x, phase='train'):
self.Q, self.K, self.V = np.split(x, 3, axis=1)
self.Q = np.split(self.Q, self.H, axis=1)
self.K = np.split(self.K, self.H, axis=1)
self.V = np.split(self.V, self.H, axis=1)
# When we generate text ('eval phase'), context_size always different
if phase == 'train':
context_size = self.context_size
else:
context_size = x.shape[0]
# Replace it by pre-init fields for faster implementation?
C = [0 for h in range(self.H)]
self.S = [0 for h in range(self.H)]
Z = [0 for h in range(self.H)]
# https://docs.python.org/3/library/multiprocessing.html
for h in range(self.H):
# Attention formula
C[h] = self.Q[h] @ self.K[h].T * self.d_k
if phase == 'train':
C[h][self.mask]=-1e12
else:
# We've got different context_size during evaluation
mask = (np.tril(np.ones((context_size, context_size)))==0)
C[h][mask]=-1e12
self.S[h] = string_softmax(C[h], context_size)
# print('softmax\'s state:\n', self.S[h])
Z[h] = self.S[h]@self.V[h]
# print('Z\'s state:\n', Z[h])
return np.concatenate(Z, axis=1)
def backward(self, dl):
dZ = np.split(dl, self.H, axis=1)
dQ = [0 for h in range(self.H)]
dK = [0 for h in range(self.H)]
dV = [0 for h in range(self.H)]
for h in range(self.H):
dV[h] = self.S[h].T @ dZ[h]
dZ[h] = dZ[h]@self.V[h].T
# We should multiplicate it later in chain-rule,
# but there isn't a mistake to do this now
dZ[h] = dZ[h] * self.backward_mask
dZ[h] = dZ[h]@ (self.S[h]*(1-self.S[h]))
dK[h] = (self.Q[h].T@dZ[h] * self.d_k).T
dQ[h] = dZ[h]@self.K[h] * self.d_k
dQ = np.concatenate(dQ, axis=1)
dK = np.concatenate(dK, axis=1)
dV = np.concatenate(dV, axis=1)
return | np.concatenate([dQ, dK, dV], axis=1) | numpy.concatenate |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 20 12:01:18 2015
@author: <NAME>
Abstract dictionary learner.
Includes gradient descent on MSE energy function as a default learning method.
"""
import numpy as np
import pickle
# the try/except block avoids an issue with the cluster
try:
import matplotlib.pyplot as plt
from scipy import ndimage
from scipy.stats import skew
except ImportError:
print('Plotting and modulation plot unavailable.')
import StimSet
class DictLearner(object):
"""Abstract base class for dictionary learner objects. Provides some
default functions for loading data, plotting network properties,
and learning."""
def __init__(self, data, learnrate, nunits,
paramfile=None, theta=0, moving_avg_rate=0.001,
stimshape=None, datatype="image", batch_size=100, pca=None,
store_every=1):
self.nunits = nunits
self.batch_size = batch_size
self.learnrate = learnrate
self.paramfile = paramfile
self.theta = theta
self.moving_avg_rate = moving_avg_rate
self.initialize_stats()
self.store_every = store_every
self._load_stims(data, datatype, stimshape, pca)
self.Q = self.rand_dict()
def initialize_stats(self):
nunits = self.nunits
self.corrmatrix_ave = np.zeros((nunits, nunits))
self.L0hist = np.array([])
self.L1hist = np.array([])
self.L2hist = np.array([])
self.L0acts = np.zeros(nunits)
self.L1acts = | np.zeros(nunits) | numpy.zeros |
import numpy as np
import random
from scipy import stats
from utils import features, class_names
from rcviz import viz
# for split between two integer values
HALF = 0.5
class DecisionTree(object):
class Node(object):
def __init__(self, label):
"""
The node in a decision tree.
Args:
label: The class label of a node.
depth: The depth of a node in a decision tree.
"""
self.label = label
self.left = None
self.right = None
self.idx = None
self.thresh = None
def set_l(self, node):
"""
Set NODE as current left child.
Args:
node: The left child.
"""
self.left = node
def set_r(self, node):
"""
Set NODE as current right child.
Args:
node: The right child.
"""
self.right = node
def set_idx(self, idx):
"""
Set feature to split.
Args:
idx: The column index of the feature to split.
"""
self.idx = idx
def set_thr(self, thresh):
"""
Set split threshold.
Args:
thresh: The threshold to split the data.
If feature <= threshold, then comes
to the left subtree, else, the right
subtree.
"""
self.thresh = thresh
def __str__(self):
if self.idx is not None and self.thresh is not None:
return str(features[self.idx]) + " thr: " + str(self.thresh)
else:
return "leaf"
def __init__(self, X, y, mode, criteria, seed=1, feature_rate=1):
"""
A decision tree.
Args:
X: The original features to train.
y: The original labels.
mode: Based on either 'ig' - information gain, or,
'gini' - gini index.
criteria: dict, specify the stopping criteria.
feature_rate: Helper argument for random forest to random
select some features from the original features.
"""
if mode not in ["ig", "gini"]:
raise ValueError("mode should be either 'ig' or 'gini', "
"but found %s" % mode)
self.tree = None
self.n_features = X.shape[1]
self.n_classes = len(set(y))
self.mode = mode
self.max_depth = criteria.get("max_depth", None)
self.node_purity = criteria.get("node_purity", None)
self.min_gain = criteria.get("min_gain", None)
self.seed = seed
self.feature_rate = feature_rate
def set_criteria(self, criteria):
"""
Change the criteria of current decision tree.
"""
self.max_depth = criteria.get("max_depth", None)
self.node_purity = criteria.get("node_purity", None)
self.min_gain = criteria.get("min_gain", None)
def feature_selector(self):
"""
Return a list of index of features to be considered to
split during training.
"""
idx = list(range(self.n_features))
if self.feature_rate == 1:
return idx
random.seed(self.seed)
feature_idx = random.sample(
idx, int(self.feature_rate * self.n_features))
return sorted(feature_idx)
@staticmethod
def entropy(y):
_, counts = np.unique(y, return_counts=True)
return stats.entropy(counts, base=2)
@staticmethod
def information_gain(X, y, thresh):
en = DecisionTree.entropy(y)
num_d = y.shape[0]
left_indicies = X <= thresh
# left partition
left_sub = y[left_indicies]
en_left = DecisionTree.entropy(left_sub)
en_left = (left_sub.shape[0] / num_d) * en_left
# right partition
right_sub = y[~left_indicies]
en_right = DecisionTree.entropy(right_sub)
en_right = (right_sub.shape[0] / num_d) * en_right
# information gain
ig = en - en_left - en_right
return ig
@staticmethod
def gini_impurity(y):
total = y.shape[0]
_, counts = | np.unique(y, return_counts=True) | numpy.unique |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Figure 1: Comparison of true and modelled local DFs.
Created: September 2021
Author: <NAME>
"""
import numpy as np
import sys
import matplotlib.pyplot as plt
from os.path import exists
sys.path.append("../src")
from ml import load_flow_ensemble, calc_DF_ensemble as calc_DF_model
from qdf import create_qdf_ensemble, create_MW_potential
from qdf import calc_DF_ensemble as calc_DF_true
from constants import kpc
from scipy.integrate import trapezoid as trapz
def normalise_DF(f, x1, x2):
"""
Return normalisation of 2D PDF in x1-x2 space, defined by 1D arrays x12.
"""
N = np.size(x1)
norm = trapz(np.array([trapz(f[:, i], x1) for i in range(N)]), x2)
return norm
# set up coordinate arrays
N_px = 128
ones = np.ones((N_px, N_px))
zeros = np.zeros((N_px, N_px))
R0 = 8 * kpc
z0 = 0.
vR0 = 0.
vphi0 = 220000.
vz0 = 0.
Rlim = 1.1 * kpc
zlim = 2.5 * kpc
vlim = 80000
R_arr = np.linspace(R0 - Rlim, R0 + Rlim, N_px)
z_arr = np.linspace(-zlim, zlim, N_px)
vR_arr = np.linspace(vR0 - vlim, vR0 + vlim, N_px)
vphi_arr = np.linspace(vphi0 - vlim, vphi0 + vlim, N_px)
vz_arr = np.linspace(vz0 - vlim, vz0 + vlim, N_px)
dfile = "fig1_data.npz"
if not exists(dfile):
# load flow ensemble
flows = load_flow_ensemble(
flowdir='../flows/fiducial',
inds=np.arange(20), n_dim=5, n_layers=8, n_hidden=64)
# load qDFs
fname = "../data/MAPs.txt"
data = np.loadtxt(fname, skiprows=1)
weights = data[:, 2]
hr = data[:, 3] / 8
sr = data[:, 4] / 220
sz = sr / np.sqrt(3)
hsr = np.ones_like(hr)
hsz = np.ones_like(hr)
mw = create_MW_potential()
qdfs = create_qdf_ensemble(hr, sr, sz, hsr, hsz, pot=mw)
# flow arguments
u_q = kpc
u_p = 100000
q_cen = np.array([8 * kpc, 0, 0.01 * kpc])
p_cen = np.array([0, 220000, 0])
# R-z: evaluate DF
R_grid, z_grid = np.meshgrid(R_arr, z_arr, indexing='ij')
q = np.stack((R_grid, zeros, z_grid), axis=-1)
p = np.stack((vR0 * ones, vphi0 * ones, vz0 * ones), axis=-1)
q = q.reshape((N_px**2, 3))
p = p.reshape((N_px**2, 3))
f_model = calc_DF_model(q, p, u_q, u_p, q_cen, p_cen, flows)
f_model = f_model.reshape((N_px, N_px))
f_true = calc_DF_true(q, p, qdfs, weights)
f_true = f_true.reshape((N_px, N_px))
f_true[np.abs(R_grid - R0) > 1 * kpc] = 0
# normalise
norm_true = normalise_DF(f_true, R_arr, z_arr)
norm_model = normalise_DF(f_model, R_arr, z_arr)
f_true /= norm_true
f_model /= norm_model
# ref value
f_ref = calc_DF_true(q_cen, p_cen, qdfs, weights) / norm_true
f1_model = f_model / f_ref
f1_true = f_true / f_ref
# calculate residuals
with np.errstate(divide='ignore', invalid='ignore'):
res1 = np.divide((f1_model - f1_true), f1_true)
# vR-vphi: evaluate DF
vR_grid, vphi_grid = np.meshgrid(vR_arr, vphi_arr, indexing='ij')
q = np.stack((R0 * ones, zeros, z0 * ones), axis=-1)
p = np.stack((vR_grid, vphi_grid, vz0 * ones), axis=-1)
q = q.reshape((N_px**2, 3))
p = p.reshape((N_px**2, 3))
f_model = calc_DF_model(q, p, u_q, u_p, q_cen, p_cen, flows)
f_model = f_model.reshape((N_px, N_px))
f_true = calc_DF_true(q, p, qdfs, weights)
f_true = f_true.reshape((N_px, N_px))
# normalise
norm_true = normalise_DF(f_true, vR_arr, vphi_arr)
norm_model = normalise_DF(f_model, vR_arr, vphi_arr)
f_true /= norm_true
f_model /= norm_model
# ref value
f_ref = calc_DF_true(q_cen, p_cen, qdfs, weights) / norm_true
f2_model = f_model / f_ref
f2_true = f_true / f_ref
# calculate residuals
with np.errstate(divide='ignore', invalid='ignore'):
res2 = np.divide((f2_model - f2_true), f2_true)
# z-vz: evaluate DF
z_grid, vz_grid = np.meshgrid(z_arr, vz_arr, indexing='ij')
q = np.stack((R0 * ones, zeros, z_grid), axis=-1)
p = np.stack((vR0 * ones, vphi0 * ones, vz_grid), axis=-1)
q = q.reshape((N_px**2, 3))
p = p.reshape((N_px**2, 3))
f_model = calc_DF_model(q, p, u_q, u_p, q_cen, p_cen, flows)
f_model = f_model.reshape((N_px, N_px))
f_true = calc_DF_true(q, p, qdfs, weights)
f_true = f_true.reshape((N_px, N_px))
# normalise
norm_true = normalise_DF(f_true, z_arr, vz_arr)
norm_model = normalise_DF(f_model, z_arr, vz_arr)
f_true /= norm_true
f_model /= norm_model
# ref value
f_ref = calc_DF_true(q_cen, p_cen, qdfs, weights) / norm_true
f3_model = f_model / f_ref
f3_true = f_true / f_ref
# calculate residuals
with np.errstate(divide='ignore', invalid='ignore'):
res3 = np.divide((f3_model - f3_true), f3_true)
np.savez(dfile, f1_true=f1_true, f1_model=f1_model, res1=res1,
f2_true=f2_true, f2_model=f2_model, res2=res2,
f3_true=f3_true, f3_model=f3_model, res3=res3)
else:
data = | np.load(dfile) | numpy.load |
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
import pickle
import os
import logging
import numpy as np
import copy
import higher
import random
from poincare_utils import PoincareDistance
class LabelDataset(Dataset):
def __init__(self, N, nnegs=5):
super(LabelDataset, self).__init__()
self.items = N.shape[0]
self.len = N.shape[0]*N.shape[0]
self.N = N
self.nnegs = nnegs
def __len__(self):
return self.len
def __getitem__(self, idx):
if idx >= self.len:
raise StopIteration
t = idx//self.items
h = idx%self.items
negs = np.arange(self.items)[self.N[t][h] == 1.0]
negs = negs.repeat(self.nnegs)
np.random.shuffle(negs)
return torch.tensor([t, h, *negs[:self.nnegs]])
class Synthetic(Dataset):
def __init__(self, drop_prob, size, change_prob):
side = 4
n = side//2
self.means = [(x,y) for x in range(-n,n) for y in range(-n,n)]
self.covs = [0.01 for x in range(-n,n) for y in range(-n,n)]
self.N = np.zeros((21,21))
self.x = []
self.y = []
self.change_prob = change_prob
self.size = size
self.drop_prob = drop_prob
self.per_label = dict(zip(list(range(21)),[0]*21))
self.d_matrix = np.zeros((self.size,21),dtype=float)
for i in range(self.size):
x, y, y_old = self.getitem(i)
self.d_matrix[i,:] = y.numpy()
for ele in y_old:
self.per_label[ele] += 1
self.x.append(x)
self.y.append(y)
print(self.per_label)
logging.info(f"{self.per_label}")
# self.d_matrix = np.transpose(self.d_matrix)
# self.d_matrix_norm = self.d_matrix / np.sum(self.d_matrix,axis=1).reshape(-1,1)
# self.c_matrix = self.d_matrix_norm@(self.d_matrix/np.sum(self.d_matrix,axis=0).reshape(1,-1)).T
# self.c_matrix_p = (self.d_matrix/np.sum(self.d_matrix,axis=0).reshape(1,-1))[email protected]_matrix_norm
# self.delta_p = np.diagonal(self.c_matrix_p).reshape(1,-1)
# self.delta = np.diagonal(self.c_matrix)
# # print(self.c_matrix)
# self.n_c = int(np.trace(self.c_matrix))
# self.power = np.multiply(self.delta,(1-self.delta),np.sum(np.multiply(self.d_matrix,self.delta_p,1-self.delta_p),axis=1).reshape(-1,1))
# print(self.n_c,self.power)
def __len__(self):
return self.size
def __getitem__(self, i):
return self.x[i], self.y[i]
@staticmethod
def sample(mean, cov):
return np.random.multivariate_normal(mean, cov)
def multihot(self, x):
with torch.no_grad():
ret = torch.zeros(21)
for l in x:
ret += nn.functional.one_hot(torch.tensor(l), 21)
return ret
def getitem(self, idx):
i = np.random.randint(len(self.means))
x = Synthetic.sample(self.means[i], self.covs[i]*np.eye(2))
y = [i, 20]
if i==0:
a = random.uniform(0,1)
if a > 1-self.change_prob:
y.append(1)
if i in [0,1,4,5]:
y.append(16)
elif i in [2,3,6,7]:
y.append(17)
elif i in [8,9,12,13]:
y.append(18)
elif i in [10,11,14,15]:
y.append(19)
# Missing Labels here
if | np.random.rand() | numpy.random.rand |
#!/usr/bin/env python
# coding: utf-8
#
# # LMC 3D structure Final Version with Systematics
#
# np.random.choice([Roger,Hector, Alfred,Luis,Angel,Xavi])
# In[ ]:
#######################
#### Load packages ####
#######################
from scipy import stats
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import LogNorm
# import warnings
import sys
import numpy as np
import pandas as pd
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.mixture import GaussianMixture
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import ConstantKernel
from scipy.optimize import curve_fit
from scipy.stats import gaussian_kde
from scipy.interpolate import Rbf
from scipy.stats import multivariate_normal
from scipy.linalg import pinv
def rbf(X,y,k):
idx = np.random.randint(np.size(X,axis = 0),size = k)
centroids = X[idx,:]
xcross = np.dot(X,X.T)
xnorms = np.repeat(np.diag(np.dot(X,X.T)).reshape(1,-1),np.size(X,axis=0),axis=0)
sigma = np.median(xnorms-2.*xcross+xnorms.T)
n = X.shape[0]
values = []
for x in X:
for c in centroids:
values.append(np.exp(-np.sum((x-c)**2.)/sigma))
phiX = np.reshape(values,(n,k))
psinv = pinv(np.dot(phiX.T,phiX))
w = np.dot(psinv,np.dot(phiX.T,y))
return w,centroids,sigma
def rbf_predict(Xhat,w,centroids,sigma):
n = Xhat.shape[0]
k = centroids.shape[0]
values = []
for x in Xhat:
for c in centroids:
values.append(np.exp(-np.sum((x-c)**2.)/sigma))
phi_Xhat = np.reshape(values,(n,k))
return np.dot(phi_Xhat,w)
def proper2geo_fn(xyz,distCenterLMC,alphaCenterLMC,deltaCenterLMC,
posAngleLMC,inclAngleLMC):
# Transform samples of location coordinates in the proper frame of the LMC
# to the rectangular heliocentric frame
#
# References:
# <NAME> & Cioni (2001)
# Weinberg and Nikolaev (2001)
#
# Parameters:
# -xyz A tensor of shape=(N, 3) containing N samples in the
# proper LMC frame
# -N No of samples
# -distCenterLMC Distance to the LMC centre (kpc)
# -alphaCenterLMC RA of the LMC centre (rad)
# -deltaCenterLMC Dec of the LMC centre (rad)
# -posAngleLMC Position angle of the LON measured w.r.t. the North (rad)
# -inclAngleLMC Inclination angle (rad)
#
# Return: A tensor of shape=(N, 3) containing N samples of rectangular
# coordinates in the heliocentric frame
# Affine transformation from local LMC frame to heliocentric frame
s11 = np.sin(alphaCenterLMC)
s12 = -np.cos(alphaCenterLMC) * np.sin(deltaCenterLMC)
s13 = -np.cos(alphaCenterLMC) * np.cos(deltaCenterLMC)
s21 = -np.cos(alphaCenterLMC)
s22 = -np.sin(alphaCenterLMC) * np.sin(deltaCenterLMC)
s23 = -np.sin(alphaCenterLMC) * np.cos(deltaCenterLMC)
s31 = np.zeros([])
s32 = np.cos(deltaCenterLMC)
s33 = -np.sin(deltaCenterLMC)
matrix = np.stack((s11,s12,s13,s21,s22,s23,s31,s32,s33),
axis=-1) # pyformat: disable
output_shape = np.concatenate((
np.shape(np.zeros(4))[:-1], (3, 3)), axis=-1)
OXYZ2 = np.reshape(matrix, output_shape.astype(int))
LMC_center = np.stack(
[
distCenterLMC *
np.cos(deltaCenterLMC) *
np.cos(alphaCenterLMC),
distCenterLMC *
np.cos(deltaCenterLMC) *
np.sin(alphaCenterLMC),
distCenterLMC *
np.sin(deltaCenterLMC)
], axis=0)
#print("LMC_center",LMC_center)
# Linear transformation from proper to local LMC frame
s11 = np.cos(posAngleLMC)
s12 = -np.sin(posAngleLMC) * np.cos(inclAngleLMC)
s13 = -np.sin(posAngleLMC) * np.sin(inclAngleLMC)
s21 = np.sin(posAngleLMC)
s22 = np.cos(posAngleLMC) * np.cos(inclAngleLMC)
s23 = np.cos(posAngleLMC) * np.sin(inclAngleLMC)
s31 = np.zeros([])
s32 = -np.sin(inclAngleLMC)
s33 = np.cos(inclAngleLMC)
matrix2 = np.stack((s11,s12,s13,s21,s22,s23,s31,s32,s33),
axis=-1) # pyformat: disable
output_shape = np.concatenate((
np.shape(np.zeros(4))[:-1], (3, 3)), axis=-1)
OXYZ5 = np.reshape(matrix2, output_shape.astype(int))
#mat1=xyz.dot(OXYZ5)
mat1=OXYZ5.dot(xyz.T).T
#print("mat1",mat1.shape)
#print(OXYZ2.shape)
#output0n=mat1.dot(OXYZ2) + np.array(LMC_center)
output0n=OXYZ2.dot(mat1.T).T + np.array(LMC_center)
#print("output0n",output0n)
#mat1 = np.matmul(OXYZ5,xyz) + np.zeros(3)
#mat2 = np.matmul(OXYZ2,mat1) + LMC_center
return output0n
def disk_fn(n, scaleHeight, scaleLength, psiAngle, ellFactor):
# Generate samples of location coordinates of the LMC disk in a proper
# reference frame and transform them to a proper LMC reference frame
# References:
# Mancini et al. (2004)
#
# Parameters:
# -N No of samples
# -scaleHeight Disk scale height (kpc)
# -scaleLength Disk scale length (kpc)
# -ellFactor Disk ellipticity factor. For a circular disk set = 1
# -psiAngle Disk minor axis position angle measured w.r.t. LON (rad)
# For a circular disk set = 0
#
# Return: A tensor of shape=(n, 3) containing N samples of the
# star locations in a local LMC reference frame
s11 = np.cos(psiAngle)
s12 = -np.sin(psiAngle)
s13 = np.zeros([])
s21 = np.sin(psiAngle)
s22 = np.cos(psiAngle)
s23 = np.zeros([])
s31 = np.zeros([])
s32 = np.zeros([])
s33 = | np.ones([]) | numpy.ones |
import os
import tempfile
import numpy as np
import scipy.ndimage.measurements as meas
from functools import reduce
import warnings
import sys
sys.path.append(os.path.abspath(r'../lib'))
import NumCppPy as NumCpp # noqa E402
####################################################################################
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
####################################################################################
def test_seed():
np.random.seed(1)
####################################################################################
def test_abs():
randValue = np.random.randint(-100, -1, [1, ]).astype(np.double).item()
assert NumCpp.absScaler(randValue) == np.abs(randValue)
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.absScaler(value), 9) == np.round(np.abs(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.absArray(cArray), np.abs(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.absArray(cArray), 9), np.round(np.abs(data), 9))
####################################################################################
def test_add():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
####################################################################################
def test_alen():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.alen(cArray) == shape.rows
####################################################################################
def test_all():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
####################################################################################
def test_allclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
tolerance = 1e-5
data1 = np.random.randn(shape.rows, shape.cols)
data2 = data1 + tolerance / 10
data3 = data1 + 1
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
assert NumCpp.allclose(cArray1, cArray2, tolerance) and not NumCpp.allclose(cArray1, cArray3, tolerance)
####################################################################################
def test_amax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
####################################################################################
def test_amin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
####################################################################################
def test_angle():
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.angleScaler(value), 9) == np.round(np.angle(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.angleArray(cArray), 9), np.round(np.angle(data), 9))
####################################################################################
def test_any():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
####################################################################################
def test_append():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.append(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
numRows = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + numRows, shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.append(data1, data2, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
NumCppols = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + NumCppols)
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.append(data1, data2, axis=1))
####################################################################################
def test_arange():
start = np.random.randn(1).item()
stop = np.random.randn(1).item() * 100
step = np.abs(np.random.randn(1).item())
if stop < start:
step *= -1
data = np.arange(start, stop, step)
assert np.array_equal(np.round(NumCpp.arange(start, stop, step).flatten(), 9), np.round(data, 9))
####################################################################################
def test_arccos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
####################################################################################
def test_arccosh():
value = np.abs(np.random.rand(1).item()) + 1
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) + 1
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
####################################################################################
def test_arcsin():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
####################################################################################
def test_arcsinh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
####################################################################################
def test_arctan():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
####################################################################################
def test_arctan2():
xy = np.random.rand(2) * 2 - 1
assert np.round(NumCpp.arctan2Scaler(xy[1], xy[0]), 9) == np.round(np.arctan2(xy[1], xy[0]), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayX = NumCpp.NdArray(shape)
cArrayY = NumCpp.NdArray(shape)
xy = np.random.rand(*shapeInput, 2) * 2 - 1
xData = xy[:, :, 0].reshape(shapeInput)
yData = xy[:, :, 1].reshape(shapeInput)
cArrayX.setArray(xData)
cArrayY.setArray(yData)
assert np.array_equal(np.round(NumCpp.arctan2Array(cArrayY, cArrayX), 9), np.round(np.arctan2(yData, xData), 9))
####################################################################################
def test_arctanh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
####################################################################################
def test_argmax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
####################################################################################
def test_argmin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
####################################################################################
def test_argsort():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]): # noqa
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]):
allPass = False
break
assert allPass
####################################################################################
def test_argwhere():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
####################################################################################
def test_around():
value = np.abs(np.random.rand(1).item()) * np.random.randint(1, 10, [1, ]).item()
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert NumCpp.aroundScaler(value, numDecimalsRound) == np.round(value, numDecimalsRound)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert np.array_equal(NumCpp.aroundArray(cArray, numDecimalsRound), np.round(data, numDecimalsRound))
####################################################################################
def test_array_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, shapeInput)
data2 = np.random.randint(1, 100, shapeInput)
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
cArray3 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
####################################################################################
def test_array_equiv():
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
data1 = np.random.randint(1, 100, shapeInput1)
data3 = np.random.randint(1, 100, shapeInput3)
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
cArray3 = NumCpp.NdArrayComplexDouble(shape3)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
imag3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data3 = real3 + 1j * imag3
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
####################################################################################
def test_asarray():
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToUint32(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.uint32))
assert cArrayCast.dtype == np.uint32
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToComplex(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.complex128))
assert cArrayCast.dtype == np.complex128
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
cArrayCast = NumCpp.astypeComplexToComplex(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.complex64))
assert cArrayCast.dtype == np.complex64
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
cArrayCast = NumCpp.astypeComplexToDouble(cArray).getNumpyArray()
warnings.filterwarnings('ignore', category=np.ComplexWarning)
assert np.array_equal(cArrayCast, data.astype(np.double))
warnings.filters.pop() # noqa
assert cArrayCast.dtype == np.double
####################################################################################
def test_average():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, axis=1), 9))
####################################################################################
def test_averageWeighted():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [shape.rows, shape.cols])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \
np.round(np.average(data, weights=weights), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [shape.rows, shape.cols])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \
np.round(np.average(data, weights=weights), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(1, shape.cols)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [1, shape.rows])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(1, shape.cols)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [1, shape.rows])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(1, shape.rows)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [1, shape.cols])
cWeights.setArray(weights)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(1, shape.rows)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [1, shape.cols])
cWeights.setArray(weights)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=1), 9))
####################################################################################
def test_binaryRepr():
value = np.random.randint(0, np.iinfo(np.uint64).max, [1, ], dtype=np.uint64).item()
assert NumCpp.binaryRepr(np.uint64(value)) == np.binary_repr(value, np.iinfo(np.uint64).bits)
####################################################################################
def test_bincount():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
assert np.array_equal(NumCpp.bincount(cArray, 0).flatten(), np.bincount(data.flatten(), minlength=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
minLength = int(np.max(data) + 10)
assert np.array_equal(NumCpp.bincount(cArray, minLength).flatten(),
np.bincount(data.flatten(), minlength=minLength))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
cWeights = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
weights = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(NumCpp.bincountWeighted(cArray, cWeights, 0).flatten(),
np.bincount(data.flatten(), minlength=0, weights=weights.flatten()))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
cWeights = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
weights = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
cWeights.setArray(weights)
minLength = int(np.max(data) + 10)
assert np.array_equal(NumCpp.bincountWeighted(cArray, cWeights, minLength).flatten(),
np.bincount(data.flatten(), minlength=minLength, weights=weights.flatten()))
####################################################################################
def test_bitwise_and():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_and(cArray1, cArray2), np.bitwise_and(data1, data2))
####################################################################################
def test_bitwise_not():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt64(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray.setArray(data)
assert np.array_equal(NumCpp.bitwise_not(cArray), np.bitwise_not(data))
####################################################################################
def test_bitwise_or():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_or(cArray1, cArray2), np.bitwise_or(data1, data2))
####################################################################################
def test_bitwise_xor():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_xor(cArray1, cArray2), np.bitwise_xor(data1, data2))
####################################################################################
def test_byteswap():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt64(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray.setArray(data)
assert np.array_equal(NumCpp.byteswap(cArray).shape, shapeInput)
####################################################################################
def test_cbrt():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cbrtArray(cArray), 9), np.round(np.cbrt(data), 9))
####################################################################################
def test_ceil():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.ceilArray(cArray), 9), np.round(np.ceil(data), 9))
####################################################################################
def test_center_of_mass():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.NONE).flatten(), 9),
np.round(meas.center_of_mass(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
coms = list()
for col in range(data.shape[1]):
coms.append(np.round(meas.center_of_mass(data[:, col])[0], 9))
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.ROW).flatten(), 9), np.round(coms, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
coms = list()
for row in range(data.shape[0]):
coms.append(np.round(meas.center_of_mass(data[row, :])[0], 9))
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.COL).flatten(), 9), np.round(coms, 9))
####################################################################################
def test_clip():
value = np.random.randint(0, 100, [1, ]).item()
minValue = np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item()
assert NumCpp.clipScaler(value, minValue, maxValue) == np.clip(value, minValue, maxValue)
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
minValue = np.random.randint(0, 10, [1, ]).item() + 1j * np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
assert NumCpp.clipScaler(value, minValue, maxValue) == np.clip(value, minValue, maxValue) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
minValue = np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item()
assert np.array_equal(NumCpp.clipArray(cArray, minValue, maxValue), np.clip(data, minValue, maxValue))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
minValue = np.random.randint(0, 10, [1, ]).item() + 1j * np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
assert np.array_equal(NumCpp.clipArray(cArray, minValue, maxValue), np.clip(data, minValue, maxValue)) # noqa
####################################################################################
def test_column_stack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.column_stack(cArray1, cArray2, cArray3, cArray4),
np.column_stack([data1, data2, data3, data4]))
####################################################################################
def test_complex():
real = np.random.rand(1).astype(np.double).item()
value = complex(real)
assert np.round(NumCpp.complexScaler(real), 9) == np.round(value, 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.complexScaler(components[0], components[1]), 9) == np.round(value, 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
realArray = NumCpp.NdArray(shape)
real = np.random.rand(shape.rows, shape.cols)
realArray.setArray(real)
assert np.array_equal(np.round(NumCpp.complexArray(realArray), 9), np.round(real + 1j * np.zeros_like(real), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
realArray = NumCpp.NdArray(shape)
imagArray = NumCpp.NdArray(shape)
real = np.random.rand(shape.rows, shape.cols)
imag = np.random.rand(shape.rows, shape.cols)
realArray.setArray(real)
imagArray.setArray(imag)
assert np.array_equal(np.round(NumCpp.complexArray(realArray, imagArray), 9), np.round(real + 1j * imag, 9))
####################################################################################
def test_concatenate():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.NONE).flatten(),
np.concatenate([data1.flatten(), data2.flatten(), data3.flatten(), data4.flatten()]))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape3 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape4 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.ROW),
np.concatenate([data1, data2, data3, data4], axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.COL),
np.concatenate([data1, data2, data3, data4], axis=1))
####################################################################################
def test_conj():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.conjScaler(value), 9) == np.round(np.conj(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.conjArray(cArray), 9), np.round(np.conj(data), 9))
####################################################################################
def test_contains():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert NumCpp.contains(cArray, value, NumCpp.Axis.NONE).getNumpyArray().item() == (value in data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert NumCpp.contains(cArray, value, NumCpp.Axis.NONE).getNumpyArray().item() == (value in data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.COL).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.COL).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data.T:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data.T:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.asarray(truth))
####################################################################################
def test_copy():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.copy(cArray), data)
####################################################################################
def test_copysign():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.copysign(cArray1, cArray2), np.copysign(data1, data2))
####################################################################################
def test_copyto():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray()
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
assert np.array_equal(NumCpp.copyto(cArray2, cArray1), data1)
####################################################################################
def test_cos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.cosScaler(value), 9) == np.round(np.cos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.cosScaler(value), 9) == np.round(np.cos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cosArray(cArray), 9), np.round(np.cos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cosArray(cArray), 9), np.round(np.cos(data), 9))
####################################################################################
def test_cosh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.coshScaler(value), 9) == np.round(np.cosh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.coshScaler(value), 9) == np.round(np.cosh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.coshArray(cArray), 9), np.round(np.cosh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.coshArray(cArray), 9), np.round(np.cosh(data), 9))
####################################################################################
def test_count_nonzero():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.count_nonzero(cArray, NumCpp.Axis.NONE) == np.count_nonzero(data)
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.count_nonzero(cArray, NumCpp.Axis.NONE) == np.count_nonzero(data)
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.ROW).flatten(), np.count_nonzero(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.ROW).flatten(), np.count_nonzero(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.COL).flatten(), np.count_nonzero(data, axis=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.COL).flatten(), np.count_nonzero(data, axis=1))
####################################################################################
def test_cross():
shape = NumCpp.Shape(1, 2)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).item() == np.cross(data1, data2).item()
shape = NumCpp.Shape(1, 2)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).item() == np.cross(data1, data2).item()
shape = NumCpp.Shape(2, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(2, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 2)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 2)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(1, 3)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.cross(data1, data2).flatten())
shape = NumCpp.Shape(1, 3)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.cross(data1, data2).flatten())
shape = NumCpp.Shape(3, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(3, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 3)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 3)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.cross(data1, data2, axis=1))
####################################################################################
def test_cube():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cube(cArray), 9), np.round(data * data * data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cube(cArray), 9), np.round(data * data * data, 9))
####################################################################################
def test_cumprod():
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.NONE).flatten(), data.cumprod())
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.NONE).flatten(), data.cumprod())
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.ROW), data.cumprod(axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.ROW), data.cumprod(axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.COL), data.cumprod(axis=1))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.COL), data.cumprod(axis=1))
####################################################################################
def test_cumsum():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.NONE).flatten(), data.cumsum())
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.NONE).flatten(), data.cumsum())
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.ROW), data.cumsum(axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.ROW), data.cumsum(axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.COL), data.cumsum(axis=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.COL), data.cumsum(axis=1))
####################################################################################
def test_deg2rad():
value = np.abs(np.random.rand(1).item()) * 360
assert np.round(NumCpp.deg2radScaler(value), 9) == np.round(np.deg2rad(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 360
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.deg2radArray(cArray), 9), np.round(np.deg2rad(data), 9))
####################################################################################
def test_degrees():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert np.round(NumCpp.degreesScaler(value), 9) == np.round(np.degrees(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.degreesArray(cArray), 9), np.round(np.degrees(data), 9))
####################################################################################
def test_deleteIndices():
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.NONE).flatten(),
np.delete(data, indicesPy, axis=None))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.ROW),
np.delete(data, indicesPy, axis=0))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.COL),
np.delete(data, indicesPy, axis=1))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, shape.size(), [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.NONE).flatten(),
np.delete(data, index, axis=None))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.ROW), np.delete(data, index, axis=0))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.COL), np.delete(data, index, axis=1))
####################################################################################
def test_diag():
shapeInput = np.random.randint(2, 25, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
k = np.random.randint(0, np.min(shapeInput), [1, ]).item()
elements = np.random.randint(1, 100, shapeInput)
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diag(cElements, k).flatten(), np.diag(elements, k))
shapeInput = np.random.randint(2, 25, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
k = np.random.randint(0, np.min(shapeInput), [1, ]).item()
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diag(cElements, k).flatten(), np.diag(elements, k))
####################################################################################
def test_diagflat():
numElements = np.random.randint(2, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
elements = np.random.randint(1, 100, [numElements, ])
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(2, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
real = np.random.randint(1, 100, [numElements, ])
imag = np.random.randint(1, 100, [numElements, ])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(1, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
elements = np.random.randint(1, 100, [numElements, ])
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(1, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
real = np.random.randint(1, 100, [numElements, ])
imag = np.random.randint(1, 100, [numElements, ])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
####################################################################################
def test_diagonal():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.ROW).flatten(),
np.diagonal(data, offset, axis1=0, axis2=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.ROW).flatten(),
np.diagonal(data, offset, axis1=0, axis2=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.COL).flatten(),
np.diagonal(data, offset, axis1=1, axis2=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.COL).flatten(),
np.diagonal(data, offset, axis1=1, axis2=0))
####################################################################################
def test_diff():
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.NONE).flatten(),
np.diff(data.flatten()))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.NONE).flatten(),
np.diff(data.flatten()))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.ROW), np.diff(data, axis=0))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.ROW), np.diff(data, axis=0))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.COL).astype(np.uint32), np.diff(data, axis=1))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.COL), np.diff(data, axis=1))
####################################################################################
def test_divide():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2[data2 == 0] = 1
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = 0
while value == 0:
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
data[data == 0] = 1
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
data2[data2 == complex(0)] = complex(1)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = 0
while value == complex(0):
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[data == complex(0)] = complex(1)
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2[data2 == 0] = 1
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
data2[data2 == complex(0)] = complex(1)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
while value == complex(0):
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
data[data == 0] = 1
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = 0
while value == 0:
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[data == complex(0)] = complex(1)
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
####################################################################################
def test_dot():
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols])
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
shapeInput = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.dot(cArray1, cArray2), np.dot(data1, data2))
shapeInput = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
real1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
imag2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.dot(cArray1, cArray2), np.dot(data1, data2))
####################################################################################
def test_empty():
shapeInput = np.random.randint(1, 100, [2, ])
cArray = NumCpp.emptyRowCol(shapeInput[0].item(), shapeInput[1].item())
assert cArray.shape[0] == shapeInput[0]
assert cArray.shape[1] == shapeInput[1]
assert cArray.size == shapeInput.prod()
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.emptyShape(shape)
assert cArray.shape[0] == shape.rows
assert cArray.shape[1] == shape.cols
assert cArray.size == shapeInput.prod()
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.empty_like(cArray1)
assert cArray2.shape().rows == shape.rows
assert cArray2.shape().cols == shape.cols
assert cArray2.size() == shapeInput.prod()
####################################################################################
def test_endianess():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
assert NumCpp.endianess(cArray) == NumCpp.Endian.NATIVE
####################################################################################
def test_equal():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 10, [shape.rows, shape.cols])
data2 = np.random.randint(0, 10, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.equal(cArray1, cArray2), np.equal(data1, data2))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.equal(cArray1, cArray2), np.equal(data1, data2))
####################################################################################
def test_exp2():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.expScaler(value), 9) == np.round(np.exp(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.expScaler(value), 9) == np.round(np.exp(value), 9)
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.exp2Scaler(value), 9) == np.round(np.exp2(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.exp2Array(cArray), 9), np.round(np.exp2(data), 9))
####################################################################################
def test_exp():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expArray(cArray), 9), np.round(np.exp(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expArray(cArray), 9), np.round(np.exp(data), 9))
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.expm1Scaler(value), 9) == np.round(np.expm1(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.expm1Scaler(value), 9) == np.round(np.expm1(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expm1Array(cArray), 9), np.round(np.expm1(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.rand(shape.rows, shape.cols)
imag = np.random.rand(shape.rows, shape.cols)
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expm1Array(cArray), 9), np.round(np.expm1(data), 9))
####################################################################################
def test_eye():
shapeInput = np.random.randint(1, 100, [1, ]).item()
randK = np.random.randint(0, shapeInput, [1, ]).item()
assert np.array_equal(NumCpp.eye1D(shapeInput, randK), np.eye(shapeInput, k=randK))
shapeInput = np.random.randint(1, 100, [1, ]).item()
randK = np.random.randint(0, shapeInput, [1, ]).item()
assert np.array_equal(NumCpp.eye1DComplex(shapeInput, randK),
np.eye(shapeInput, k=randK) + 1j * np.zeros([shapeInput, shapeInput]))
shapeInput = np.random.randint(10, 100, [2, ])
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eye2D(shapeInput[0].item(), shapeInput[1].item(), randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK))
shapeInput = np.random.randint(10, 100, [2, ])
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eye2DComplex(shapeInput[0].item(), shapeInput[1].item(), randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK) +
1j * np.zeros(shapeInput))
shapeInput = np.random.randint(10, 100, [2, ])
cShape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eyeShape(cShape, randK), np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK))
shapeInput = np.random.randint(10, 100, [2, ])
cShape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eyeShapeComplex(cShape, randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK) +
1j * np.zeros(shapeInput))
####################################################################################
def test_fill_diagonal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
NumCpp.fillDiagonal(cArray, 666)
np.fill_diagonal(data, 666)
assert np.array_equal(cArray.getNumpyArray(), data)
####################################################################################
def test_find():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
value = data.mean()
cMask = NumCpp.operatorGreater(cArray, value)
cMaskArray = NumCpp.NdArrayBool(cMask.shape[0], cMask.shape[1])
cMaskArray.setArray(cMask)
idxs = NumCpp.find(cMaskArray).astype(np.int64)
idxsPy = np.nonzero((data > value).flatten())[0]
assert np.array_equal(idxs.flatten(), idxsPy)
####################################################################################
def test_findN():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
value = data.mean()
cMask = NumCpp.operatorGreater(cArray, value)
cMaskArray = NumCpp.NdArrayBool(cMask.shape[0], cMask.shape[1])
cMaskArray.setArray(cMask)
idxs = NumCpp.findN(cMaskArray, 8).astype(np.int64)
idxsPy = np.nonzero((data > value).flatten())[0]
assert np.array_equal(idxs.flatten(), idxsPy[:8])
####################################################################################
def fix():
value = np.random.randn(1).item() * 100
assert NumCpp.fixScaler(value) == np.fix(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.fixArray(cArray), np.fix(data))
####################################################################################
def test_flatten():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flatten(cArray).getNumpyArray(), np.resize(data, [1, data.size]))
####################################################################################
def test_flatnonzero():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flatnonzero(cArray).getNumpyArray().flatten(), np.flatnonzero(data))
####################################################################################
def test_flip():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flip(cArray, NumCpp.Axis.NONE).getNumpyArray(),
np.flip(data.reshape(1, data.size), axis=1).reshape(shapeInput))
####################################################################################
def test_fliplr():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.fliplr(cArray).getNumpyArray(), np.fliplr(data))
####################################################################################
def test_flipud():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flipud(cArray).getNumpyArray(), np.flipud(data))
####################################################################################
def test_floor():
value = np.random.randn(1).item() * 100
assert NumCpp.floorScaler(value) == np.floor(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.floorArray(cArray), np.floor(data))
####################################################################################
def test_floor_divide():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.floor_divideScaler(value1, value2) == np.floor_divide(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.floor_divideArray(cArray1, cArray2), np.floor_divide(data1, data2))
####################################################################################
def test_fmax():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.fmaxScaler(value1, value2) == np.fmax(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fmaxArray(cArray1, cArray2), np.fmax(data1, data2))
####################################################################################
def test_fmin():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.fminScaler(value1, value2) == np.fmin(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fminArray(cArray1, cArray2), np.fmin(data1, data2))
####################################################################################
def test_fmod():
value1 = np.random.randint(1, 100, [1, ]).item() * 100 + 1000
value2 = np.random.randint(1, 100, [1, ]).item() * 100 + 1000
assert NumCpp.fmodScaler(value1, value2) == np.fmod(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32) * 100 + 1000
data2 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fmodArray(cArray1, cArray2), np.fmod(data1, data2))
####################################################################################
def test_fromfile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = r'C:\Temp'
if not os.path.exists(tempDir):
os.mkdir(tempDir)
tempFile = os.path.join(tempDir, 'NdArrayDump.bin')
NumCpp.dump(cArray, tempFile)
assert os.path.isfile(tempFile)
data2 = NumCpp.fromfile(tempFile, '').reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
tempFile = os.path.join(tempDir, 'NdArrayDump')
NumCpp.tofile(cArray, tempFile, '\n')
assert os.path.exists(tempFile + '.txt')
data2 = NumCpp.fromfile(tempFile + '.txt', '\n').reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile + '.txt')
####################################################################################
def test_fromiter():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.fromiter(cArray).flatten(), data.flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.fromiter(cArray).flatten(), data.flatten())
####################################################################################
def test_full():
shapeInput = np.random.randint(1, 100, [1, ]).item()
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullSquare(shapeInput, value)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput**2 and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [1, ]).item()
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullSquareComplex(shapeInput, value)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput**2 and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullRowCol(shapeInput[0].item(), shapeInput[1].item(), value)
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullRowColComplex(shapeInput[0].item(), shapeInput[1].item(), value)
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullShape(shape, value)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullShape(shape, value)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == value))
####################################################################################
def test_full_like():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
value = np.random.randint(1, 100, [1, ]).item()
cArray2 = NumCpp.full_like(cArray1, value)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray2 = NumCpp.full_likeComplex(cArray1, value)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == value))
####################################################################################
def test_gcd():
if not NumCpp.NUMCPP_NO_USE_BOOST or NumCpp.STL_GCD_LCM:
value1 = np.random.randint(1, 1000, [1, ]).item()
value2 = np.random.randint(1, 1000, [1, ]).item()
assert NumCpp.gcdScaler(value1, value2) == np.gcd(value1, value2)
if not NumCpp.NUMCPP_NO_USE_BOOST:
size = np.random.randint(20, 100, [1, ]).item()
cArray = NumCpp.NdArrayUInt32(1, size)
data = np.random.randint(1, 1000, [size, ], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.gcdArray(cArray) == np.gcd.reduce(data) # noqa
####################################################################################
def test_gradient():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.ROW), np.gradient(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.ROW), np.gradient(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.COL), np.gradient(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.COL), np.gradient(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.NONE).flatten(),
np.gradient(data.flatten(), axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.NONE).flatten(),
np.gradient(data.flatten(), axis=0))
####################################################################################
def test_greater():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater(cArray1, cArray2).getNumpyArray(),
np.greater(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater(cArray1, cArray2).getNumpyArray(),
np.greater(data1, data2))
####################################################################################
def test_greater_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater_equal(cArray1, cArray2).getNumpyArray(),
np.greater_equal(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater_equal(cArray1, cArray2).getNumpyArray(),
np.greater_equal(data1, data2))
####################################################################################
def test_histogram():
shape = NumCpp.Shape(1024, 1024)
cArray = NumCpp.NdArray(shape)
data = np.random.randn(1024, 1024) * np.random.randint(1, 10, [1, ]).item() + np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
numBins = np.random.randint(10, 30, [1, ]).item()
histogram, bins = NumCpp.histogram(cArray, numBins)
h, b = np.histogram(data, numBins)
assert np.array_equal(histogram.getNumpyArray().flatten().astype(np.int32), h)
assert np.array_equal(np.round(bins.getNumpyArray().flatten(), 9), np.round(b, 9))
shape = NumCpp.Shape(1024, 1024)
cArray = NumCpp.NdArray(shape)
data = np.random.randn(1024, 1024) * np.random.randint(1, 10, [1, ]).item() + np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
binEdges = np.linspace(data.min(), data.max(), 15, endpoint=True)
cBinEdges = NumCpp.NdArray(1, binEdges.size)
cBinEdges.setArray(binEdges)
histogram = NumCpp.histogram(cArray, cBinEdges)
h, _ = np.histogram(data, binEdges)
assert np.array_equal(histogram.flatten().astype(np.int32), h)
####################################################################################
def test_hstack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.hstack(cArray1, cArray2, cArray3, cArray4),
np.hstack([data1, data2, data3, data4]))
####################################################################################
def test_hypot():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.hypotScaler(value1, value2) == np.hypot(value1, value2)
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
value3 = np.random.randn(1).item() * 100 + 1000
assert (np.round(NumCpp.hypotScalerTriple(value1, value2, value3), 9) ==
np.round(np.sqrt(value1**2 + value2**2 + value3**2), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.hypotArray(cArray1, cArray2), 9),
np.round(np.hypot(data1, data2), 9))
####################################################################################
def test_identity():
squareSize = np.random.randint(10, 100, [1, ]).item()
assert np.array_equal(NumCpp.identity(squareSize).getNumpyArray(), np.identity(squareSize))
squareSize = np.random.randint(10, 100, [1, ]).item()
assert np.array_equal(NumCpp.identityComplex(squareSize).getNumpyArray(),
np.identity(squareSize) + 1j * np.zeros([squareSize, squareSize]))
####################################################################################
def test_imag():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.imagScaler(value), 9) == np.round(np.imag(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.imagArray(cArray), 9), np.round(np.imag(data), 9))
####################################################################################
def test_interp():
endPoint = np.random.randint(10, 20, [1, ]).item()
numPoints = np.random.randint(50, 100, [1, ]).item()
resample = np.random.randint(2, 5, [1, ]).item()
xpData = np.linspace(0, endPoint, numPoints, endpoint=True)
fpData = np.sin(xpData)
xData = np.linspace(0, endPoint, numPoints * resample, endpoint=True)
cXp = NumCpp.NdArray(1, numPoints)
cFp = NumCpp.NdArray(1, numPoints)
cX = NumCpp.NdArray(1, numPoints * resample)
cXp.setArray(xpData)
cFp.setArray(fpData)
cX.setArray(xData)
assert np.array_equal(np.round(NumCpp.interp(cX, cXp, cFp).flatten(), 9),
np.round(np.interp(xData, xpData, fpData), 9))
####################################################################################
def test_intersect1d():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.uint32)
data2 = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.intersect1d(cArray1, cArray2).getNumpyArray().flatten(), np.intersect1d(data1, data2))
####################################################################################
def test_invert():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.invert(cArray).getNumpyArray(), np.invert(data))
####################################################################################
def test_isclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.rand(shape.rows, shape.cols)
data2 = data1 + np.random.randn(shape.rows, shape.cols) * 1e-5
cArray1.setArray(data1)
cArray2.setArray(data2)
rtol = 1e-5
atol = 1e-8
assert np.array_equal(NumCpp.isclose(cArray1, cArray2, rtol, atol).getNumpyArray(),
np.isclose(data1, data2, rtol=rtol, atol=atol))
####################################################################################
def test_isinf():
value = np.random.randn(1).item() * 100 + 1000
assert NumCpp.isinfScaler(value) == np.isinf(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data[data > 1000] = np.inf
cArray.setArray(data)
assert np.array_equal(NumCpp.isinfArray(cArray), np.isinf(data))
####################################################################################
def test_isnan():
value = np.random.randn(1).item() * 100 + 1000
assert NumCpp.isnanScaler(value) == np.isnan(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data[data > 1000] = np.nan
cArray.setArray(data)
assert np.array_equal(NumCpp.isnanArray(cArray), np.isnan(data))
####################################################################################
def test_lcm():
if not NumCpp.NUMCPP_NO_USE_BOOST or NumCpp.STL_GCD_LCM:
value1 = np.random.randint(1, 1000, [1, ]).item()
value2 = np.random.randint(1, 1000, [1, ]).item()
assert NumCpp.lcmScaler(value1, value2) == np.lcm(value1, value2)
if not NumCpp.NUMCPP_NO_USE_BOOST:
size = np.random.randint(2, 10, [1, ]).item()
cArray = NumCpp.NdArrayUInt32(1, size)
data = np.random.randint(1, 100, [size, ], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.lcmArray(cArray) == np.lcm.reduce(data) # noqa
####################################################################################
def test_ldexp():
value1 = np.random.randn(1).item() * 100
value2 = np.random.randint(1, 20, [1, ]).item()
assert np.round(NumCpp.ldexpScaler(value1, value2), 9) == np.round(np.ldexp(value1, value2), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayUInt8(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100
data2 = np.random.randint(1, 20, [shape.rows, shape.cols], dtype=np.uint8)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.ldexpArray(cArray1, cArray2), 9), np.round(np.ldexp(data1, data2), 9))
####################################################################################
def test_left_shift():
shapeInput = np.random.randint(20, 100, [2, ])
bitsToshift = np.random.randint(1, 32, [1, ]).item()
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, np.iinfo(np.uint32).max, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.left_shift(cArray, bitsToshift).getNumpyArray(),
np.left_shift(data, bitsToshift))
####################################################################################
def test_less():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less(cArray1, cArray2).getNumpyArray(),
np.less(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less(cArray1, cArray2).getNumpyArray(),
np.less(data1, data2))
####################################################################################
def test_less_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less_equal(cArray1, cArray2).getNumpyArray(),
np.less_equal(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less_equal(cArray1, cArray2).getNumpyArray(),
np.less_equal(data1, data2))
####################################################################################
def test_load():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
tempFile = os.path.join(tempDir, 'NdArrayDump.bin')
NumCpp.dump(cArray, tempFile)
assert os.path.isfile(tempFile)
data2 = NumCpp.load(tempFile).reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile)
####################################################################################
def test_linspace():
start = np.random.randint(1, 10, [1, ]).item()
end = np.random.randint(start + 10, 100, [1, ]).item()
numPoints = np.random.randint(1, 100, [1, ]).item()
assert np.array_equal(np.round(NumCpp.linspace(start, end, numPoints, True).getNumpyArray().flatten(), 9),
np.round(np.linspace(start, end, numPoints, endpoint=True), 9))
start = np.random.randint(1, 10, [1, ]).item()
end = np.random.randint(start + 10, 100, [1, ]).item()
numPoints = np.random.randint(1, 100, [1, ]).item()
assert np.array_equal(np.round(NumCpp.linspace(start, end, numPoints, False).getNumpyArray().flatten(), 9),
np.round(np.linspace(start, end, numPoints, endpoint=False), 9))
####################################################################################
def test_log():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.logScaler(value), 9) == np.round(np.log(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.logScaler(value), 9) == np.round(np.log(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.logArray(cArray), 9), np.round(np.log(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.logArray(cArray), 9), np.round(np.log(data), 9))
####################################################################################
def test_log10():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log10Scaler(value), 9) == np.round(np.log10(value), 9)
components = np.random.randn(2).astype(np.double) * 100 + 100
value = complex(components[0], components[1])
assert np.round(NumCpp.log10Scaler(value), 9) == np.round(np.log10(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log10Array(cArray), 9), np.round(np.log10(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log10Array(cArray), 9), np.round(np.log10(data), 9))
####################################################################################
def test_log1p():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log1pScaler(value), 9) == np.round(np.log1p(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log1pArray(cArray), 9), np.round(np.log1p(data), 9))
####################################################################################
def test_log2():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log2Scaler(value), 9) == np.round(np.log2(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log2Array(cArray), 9), np.round(np.log2(data), 9))
####################################################################################
def test_logical_and():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_and(cArray1, cArray2).getNumpyArray(), np.logical_and(data1, data2))
####################################################################################
def test_logical_not():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.logical_not(cArray).getNumpyArray(), np.logical_not(data))
####################################################################################
def test_logical_or():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_or(cArray1, cArray2).getNumpyArray(), np.logical_or(data1, data2))
####################################################################################
def test_logical_xor():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_xor(cArray1, cArray2).getNumpyArray(), np.logical_xor(data1, data2))
####################################################################################
def test_matmul():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 20, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 20, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
data1 = np.random.randint(0, 20, [shape1.rows, shape1.cols])
real2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
imag2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
imag2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArray(shape2)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(0, 20, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
####################################################################################
def test_max():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.max(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.max(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.ROW).flatten(),
np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.ROW).flatten(),
np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.COL).flatten(),
np.max(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.COL).flatten(),
np.max(data, axis=1))
####################################################################################
def test_maximum():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.maximum(cArray1, cArray2), np.maximum(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.maximum(cArray1, cArray2), np.maximum(data1, data2))
####################################################################################
def test_mean():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.mean(cArray, NumCpp.Axis.NONE).getNumpyArray().item(), 9) == \
np.round(np.mean(data, axis=None).item(), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.round(NumCpp.mean(cArray, NumCpp.Axis.NONE).getNumpyArray().item(), 9) == \
np.round(np.mean(data, axis=None).item(), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=1), 9))
####################################################################################
def test_median():
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) # noqa
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.median(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item() == np.median(data, axis=None).item()
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.median(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.median(data, axis=0))
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.median(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.median(data, axis=1))
####################################################################################
def test_meshgrid():
start = np.random.randint(0, 20, [1, ]).item()
end = np.random.randint(30, 100, [1, ]).item()
step = np.random.randint(1, 5, [1, ]).item()
dataI = np.arange(start, end, step)
iSlice = NumCpp.Slice(start, end, step)
start = np.random.randint(0, 20, [1, ]).item()
end = np.random.randint(30, 100, [1, ]).item()
step = np.random.randint(1, 5, [1, ]).item()
dataJ = np.arange(start, end, step)
jSlice = NumCpp.Slice(start, end, step)
iMesh, jMesh = np.meshgrid(dataI, dataJ)
iMeshC, jMeshC = NumCpp.meshgrid(iSlice, jSlice)
assert np.array_equal(iMeshC.getNumpyArray(), iMesh)
assert np.array_equal(jMeshC.getNumpyArray(), jMesh)
####################################################################################
def test_min():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.min(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.min(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.ROW).flatten(),
np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.ROW).flatten(),
np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.COL).flatten(),
np.min(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.COL).flatten(),
np.min(data, axis=1))
####################################################################################
def test_minimum():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.minimum(cArray1, cArray2), np.minimum(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.minimum(cArray1, cArray2), np.minimum(data1, data2))
####################################################################################
def test_mod():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
data2 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.mod(cArray1, cArray2).getNumpyArray(), np.mod(data1, data2))
####################################################################################
def test_multiply():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
####################################################################################
def test_nan_to_num():
shapeInput = np.random.randint(50, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.size(), ]).astype(np.double)
nan_idx = np.random.choice(range(data.size), 10, replace=False)
pos_inf_idx = np.random.choice(range(data.size), 10, replace=False)
neg_inf_idx = np.random.choice(range(data.size), 10, replace=False)
data[nan_idx] = np.nan
data[pos_inf_idx] = np.inf
data[neg_inf_idx] = -np.inf
data = data.reshape(shapeInput)
cArray.setArray(data)
nan_replace = float(np.random.randint(100))
pos_inf_replace = float(np.random.randint(100))
neg_inf_replace = float(np.random.randint(100))
assert np.array_equal(NumCpp.nan_to_num(cArray, nan_replace, pos_inf_replace, neg_inf_replace),
np.nan_to_num(data, nan=nan_replace, posinf=pos_inf_replace, neginf=neg_inf_replace))
####################################################################################
def test_nanargmax():
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanargmax(cArray, NumCpp.Axis.NONE).item() == np.nanargmax(data)
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmax(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanargmax(data, axis=0))
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmax(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanargmax(data, axis=1))
####################################################################################
def test_nanargmin():
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanargmin(cArray, NumCpp.Axis.NONE).item() == np.nanargmin(data)
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmin(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanargmin(data, axis=0))
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmin(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanargmin(data, axis=1))
####################################################################################
def test_nancumprod():
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumprod(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.nancumprod(data, axis=None))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumprod(cArray, NumCpp.Axis.ROW).getNumpyArray(), np.nancumprod(data, axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumprod(cArray, NumCpp.Axis.COL).getNumpyArray(), np.nancumprod(data, axis=1))
####################################################################################
def test_nancumsum():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumsum(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.nancumsum(data, axis=None))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumsum(cArray, NumCpp.Axis.ROW).getNumpyArray(), np.nancumsum(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumsum(cArray, NumCpp.Axis.COL).getNumpyArray(), np.nancumsum(data, axis=1))
####################################################################################
def test_nanmax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanmax(cArray, NumCpp.Axis.NONE).item() == np.nanmax(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmax(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmax(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanmax(data, axis=1))
####################################################################################
def test_nanmean():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanmean(cArray, NumCpp.Axis.NONE).item() == np.nanmean(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmean(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanmean(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmean(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanmean(data, axis=1))
####################################################################################
def test_nanmedian():
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) # noqa
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert (NumCpp.nanmedian(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item() ==
np.nanmedian(data, axis=None).item())
# isEven = True
# while isEven:
# shapeInput = np.random.randint(20, 100, [2, ])
# isEven = shapeInput[0].item() % 2 == 0
# shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
# cArray = NumCpp.NdArray(shape)
# data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
# data = data.flatten()
# data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
# data = data.reshape(shapeInput)
# cArray.setArray(data)
# assert np.array_equal(NumCpp.nanmedian(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
# np.nanmedian(data, axis=0))
#
# isEven = True
# while isEven:
# shapeInput = np.random.randint(20, 100, [2, ])
# isEven = shapeInput[1].item() % 2 == 0
# shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
# cArray = NumCpp.NdArray(shape)
# data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
# data = data.flatten()
# data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
# data = data.reshape(shapeInput)
# cArray.setArray(data)
# assert np.array_equal(NumCpp.nanmedian(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
# np.nanmedian(data, axis=1))
####################################################################################
def test_nanmin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanmin(cArray, NumCpp.Axis.NONE).item() == np.nanmin(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmin(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmin(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanmin(data, axis=1))
####################################################################################
def test_nanpercentile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'lower').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'higher').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'nearest').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'midpoint').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='midpoint'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'linear').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='linear'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.ROW, 'lower').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=0, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.ROW, 'higher').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=0, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.ROW,
'nearest').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=0, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(
np.round(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.ROW,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=0, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.ROW,
'linear').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=0, interpolation='linear'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.COL, 'lower').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=1, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.COL, 'higher').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=1, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.COL,
'nearest').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=1, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(
np.round(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.COL,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=1, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(
np.round(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.COL, 'linear').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=1, interpolation='linear'), 9))
####################################################################################
def test_nanprod():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanprod(cArray, NumCpp.Axis.NONE).item() == np.nanprod(data, axis=None)
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanprod(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.nanprod(data, axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanprod(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.nanprod(data, axis=1))
####################################################################################
def test_nans():
shapeInput = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.nansSquare(shapeInput)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput ** 2 and np.all(np.isnan(cArray)))
shapeInput = np.random.randint(20, 100, [2, ])
cArray = NumCpp.nansRowCol(shapeInput[0].item(), shapeInput[1].item())
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(np.isnan(cArray)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.nansShape(shape)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(np.isnan(cArray)))
####################################################################################
def test_nans_like():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.nans_like(cArray1)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(np.isnan(cArray2.getNumpyArray())))
####################################################################################
def test_nanstd():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.round(NumCpp.nanstdev(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.nanstd(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanstdev(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.nanstd(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanstdev(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.nanstd(data, axis=1), 9))
####################################################################################
def test_nansum():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nansum(cArray, NumCpp.Axis.NONE).item() == np.nansum(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nansum(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.nansum(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nansum(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.nansum(data, axis=1))
####################################################################################
def test_nanvar():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.round(NumCpp.nanvar(cArray, NumCpp.Axis.NONE).item(), 8) == np.round(np.nanvar(data), 8)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanvar(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 8),
np.round(np.nanvar(data, axis=0), 8))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanvar(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 8),
np.round(np.nanvar(data, axis=1), 8))
####################################################################################
def test_nbytes():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.nbytes(cArray) == data.size * 8
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.nbytes(cArray) == data.size * 16
####################################################################################
def test_negative():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.negative(cArray).getNumpyArray(), 9),
np.round(np.negative(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.negative(cArray).getNumpyArray(), 9),
np.round(np.negative(data), 9))
####################################################################################
def test_newbyteorderArray():
value = np.random.randint(1, 100, [1, ]).item()
assert (NumCpp.newbyteorderScaler(value, NumCpp.Endian.BIG) ==
np.asarray([value], dtype=np.uint32).newbyteorder().item())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.newbyteorderArray(cArray, NumCpp.Endian.BIG),
data.newbyteorder())
####################################################################################
def test_none():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.none(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.logical_not(np.any(data).item())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.none(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.logical_not(np.any(data).item())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.ROW).flatten().astype(bool),
np.logical_not(np.any(data, axis=0)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.ROW).flatten().astype(bool),
np.logical_not(np.any(data, axis=0)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.COL).flatten().astype(bool),
np.logical_not(np.any(data, axis=1)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.COL).flatten().astype(bool),
np.logical_not(np.any(data, axis=1)))
####################################################################################
def test_nonzero():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
row, col = np.nonzero(data)
rowC, colC = NumCpp.nonzero(cArray)
assert (np.array_equal(rowC.getNumpyArray().flatten(), row) and
np.array_equal(colC.getNumpyArray().flatten(), col))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
row, col = np.nonzero(data)
rowC, colC = NumCpp.nonzero(cArray)
assert (np.array_equal(rowC.getNumpyArray().flatten(), row) and
np.array_equal(colC.getNumpyArray().flatten(), col))
####################################################################################
def test_norm():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.norm(cArray, NumCpp.Axis.NONE).flatten() == np.linalg.norm(data.flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.norm(cArray, NumCpp.Axis.NONE).item() is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
norms = NumCpp.norm(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten()
allPass = True
for idx, row in enumerate(data.transpose()):
if norms[idx] != np.linalg.norm(row):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
norms = NumCpp.norm(cArray, NumCpp.Axis.COL).getNumpyArray().flatten()
allPass = True
for idx, row in enumerate(data):
if norms[idx] != np.linalg.norm(row):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
norms = NumCpp.norm(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten()
assert norms is not None
####################################################################################
def test_not_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.not_equal(cArray1, cArray2).getNumpyArray(), np.not_equal(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.not_equal(cArray1, cArray2).getNumpyArray(), np.not_equal(data1, data2))
####################################################################################
def test_ones():
shapeInput = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.onesSquare(shapeInput)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput ** 2 and np.all(cArray == 1))
shapeInput = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.onesSquareComplex(shapeInput)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput ** 2 and np.all(cArray == complex(1, 0)))
shapeInput = np.random.randint(20, 100, [2, ])
cArray = NumCpp.onesRowCol(shapeInput[0].item(), shapeInput[1].item())
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == 1))
shapeInput = np.random.randint(20, 100, [2, ])
cArray = NumCpp.onesRowColComplex(shapeInput[0].item(), shapeInput[1].item())
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == complex(1, 0)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.onesShape(shape)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == 1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.onesShapeComplex(shape)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == complex(1, 0)))
####################################################################################
def test_ones_like():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.ones_like(cArray1)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == 1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.ones_likeComplex(cArray1)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == complex(1, 0)))
####################################################################################
def test_outer():
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
data2 = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.outer(cArray1, cArray2), np.outer(data1, data2))
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.outer(cArray1, cArray2), np.outer(data1, data2))
####################################################################################
def test_pad():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
padWidth = np.random.randint(1, 10, [1, ]).item()
padValue = np.random.randint(1, 100, [1, ]).item()
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray = NumCpp.NdArray(shape)
cArray.setArray(data)
assert np.array_equal(NumCpp.pad(cArray, padWidth, padValue).getNumpyArray(),
np.pad(data, padWidth, mode='constant', constant_values=padValue))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
padWidth = np.random.randint(1, 10, [1, ]).item()
padValue = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray = NumCpp.NdArrayComplexDouble(shape)
cArray.setArray(data)
assert np.array_equal(NumCpp.pad(cArray, padWidth, padValue).getNumpyArray(),
np.pad(data, padWidth, mode='constant', constant_values=padValue))
####################################################################################
def test_partition():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput.prod(), [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.NONE).getNumpyArray().flatten()
assert (np.all(partitionedArray[kthElement] <= partitionedArray[kthElement]) and
np.all(partitionedArray[kthElement:] >= partitionedArray[kthElement]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput.prod(), [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.NONE).getNumpyArray().flatten()
assert (np.all(partitionedArray[kthElement] <= partitionedArray[kthElement]) and
np.all(partitionedArray[kthElement:] >= partitionedArray[kthElement]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[0], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.ROW).getNumpyArray().transpose()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[0], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.ROW).getNumpyArray().transpose()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[1], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.COL).getNumpyArray()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[1], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.COL).getNumpyArray()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
####################################################################################
def test_percentile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'lower').item() ==
np.percentile(data, percentile, axis=None, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'higher').item() ==
np.percentile(data, percentile, axis=None, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'nearest').item() ==
np.percentile(data, percentile, axis=None, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'midpoint').item() ==
np.percentile(data, percentile, axis=None, interpolation='midpoint'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'linear').item() ==
np.percentile(data, percentile, axis=None, interpolation='linear'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.ROW, 'lower').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=0, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.ROW, 'higher').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=0, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.ROW, 'nearest').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=0, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.ROW,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=0, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.ROW,
'linear').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=0, interpolation='linear'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.COL, 'lower').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=1, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.COL, 'higher').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=1, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.COL, 'nearest').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=1, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.COL,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=1, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.COL,
'linear').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=1, interpolation='linear'), 9))
####################################################################################
def test_polar():
components = np.random.rand(2).astype(np.double)
assert NumCpp.polarScaler(components[0], components[1])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
magArray = NumCpp.NdArray(shape)
angleArray = NumCpp.NdArray(shape)
mag = np.random.rand(shape.rows, shape.cols)
angle = np.random.rand(shape.rows, shape.cols)
magArray.setArray(mag)
angleArray.setArray(angle)
assert NumCpp.polarArray(magArray, angleArray) is not None
####################################################################################
def test_power():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
exponent = np.random.randint(0, 5, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponent = np.random.randint(0, 5, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cExponents = NumCpp.NdArrayUInt8(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
exponents = np.random.randint(0, 5, [shape.rows, shape.cols]).astype(np.uint8)
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cExponents = NumCpp.NdArrayUInt8(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponents = np.random.randint(0, 5, [shape.rows, shape.cols]).astype(np.uint8)
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
####################################################################################
def test_powerf():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
exponent = np.random.rand(1).item() * 3
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerfArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponent = np.random.rand(1).item() * 3
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerfArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cExponents = NumCpp.NdArray(shape)
data = np.random.randint(0, 20, [shape.rows, shape.cols])
exponents = np.random.rand(shape.rows, shape.cols) * 3
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerfArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cExponents = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponents = np.random.rand(shape.rows, shape.cols) * 3 + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerfArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
####################################################################################
def test_prod():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert NumCpp.prod(cArray, NumCpp.Axis.NONE).item() == data.prod()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 15, [shape.rows, shape.cols])
imag = np.random.randint(1, 15, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.prod(cArray, NumCpp.Axis.NONE).item() == data.prod()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), data.prod(axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 15, [shape.rows, shape.cols])
imag = np.random.randint(1, 15, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), data.prod(axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), data.prod(axis=1))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 15, [shape.rows, shape.cols])
imag = np.random.randint(1, 15, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), data.prod(axis=1))
####################################################################################
def test_proj():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert NumCpp.projScaler(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cData = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cData.setArray(data)
assert NumCpp.projArray(cData) is not None
####################################################################################
def test_ptp():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.ptp(cArray, NumCpp.Axis.NONE).getNumpyArray().item() == data.ptp()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.ptp(cArray, NumCpp.Axis.NONE).getNumpyArray().item() == data.ptp()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.ptp(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten().astype(np.uint32),
data.ptp(axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.ptp(cArray, NumCpp.Axis.COL).getNumpyArray().flatten().astype(np.uint32),
data.ptp(axis=1))
####################################################################################
def test_put():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
numIndices = np.random.randint(0, shape.size())
indices = np.asarray(range(numIndices), np.uint32)
value = np.random.randint(1, 500)
cIndices = NumCpp.NdArrayUInt32(1, numIndices)
cIndices.setArray(indices)
NumCpp.put(cArray, cIndices, value)
data.put(indices, value)
assert np.array_equal(cArray.getNumpyArray(), data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
numIndices = np.random.randint(0, shape.size())
indices = np.asarray(range(numIndices), dtype=np.uint32)
values = np.random.randint(1, 500, [numIndices, ])
cIndices = NumCpp.NdArrayUInt32(1, numIndices)
cValues = NumCpp.NdArray(1, numIndices)
cIndices.setArray(indices)
cValues.setArray(values)
NumCpp.put(cArray, cIndices, cValues)
data.put(indices, values)
assert np.array_equal(cArray.getNumpyArray(), data)
####################################################################################
def test_rad2deg():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert np.round(NumCpp.rad2degScaler(value), 9) == np.round(np.rad2deg(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rad2degArray(cArray), 9), np.round(np.rad2deg(data), 9))
####################################################################################
def test_radians():
value = np.abs(np.random.rand(1).item()) * 360
assert np.round(NumCpp.radiansScaler(value), 9) == np.round(np.radians(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 360
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.radiansArray(cArray), 9), np.round(np.radians(data), 9))
####################################################################################
def test_ravel():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
cArray2 = NumCpp.ravel(cArray)
assert np.array_equal(cArray2.getNumpyArray().flatten(), np.ravel(data))
####################################################################################
def test_real():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.realScaler(value), 9) == np.round(np.real(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.realArray(cArray), 9), np.round(np.real(data), 9))
####################################################################################
def test_reciprocal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.reciprocal(cArray), 9), np.round(np.reciprocal(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.double)
imag = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.double)
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.reciprocal(cArray), 9), np.round(np.reciprocal(data), 9))
####################################################################################
def test_remainder():
# numpy and cmath remainders are calculated differently, so convert for testing purposes
values = np.random.rand(2) * 100
values = np.sort(values)
res = NumCpp.remainderScaler(values[1].item(), values[0].item())
if res < 0:
res += values[0].item()
assert np.round(res, 9) == np.round(np.remainder(values[1], values[0]), 9)
# numpy and cmath remainders are calculated differently, so convert for testing purposes
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.rand(shape.rows, shape.cols) * 100 + 10
data2 = data1 - np.random.rand(shape.rows, shape.cols) * 10
cArray1.setArray(data1)
cArray2.setArray(data2)
res = NumCpp.remainderArray(cArray1, cArray2)
res[res < 0] = res[res < 0] + data2[res < 0]
assert np.array_equal(np.round(res, 9), np.round(np.remainder(data1, data2), 9))
####################################################################################
def test_replace():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
oldValue = np.random.randint(1, 100, 1).item()
newValue = np.random.randint(1, 100, 1).item()
dataCopy = data.copy()
dataCopy[dataCopy == oldValue] = newValue
assert np.array_equal(NumCpp.replace(cArray, oldValue, newValue), dataCopy)
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
oldValue = np.random.randint(1, 100, 1).item() + 1j * np.random.randint(1, 100, 1).item()
newValue = np.random.randint(1, 100, 1).item() + 1j * np.random.randint(1, 100, 1).item()
dataCopy = data.copy()
dataCopy[dataCopy == oldValue] = newValue
assert np.array_equal(NumCpp.replace(cArray, oldValue, newValue), dataCopy)
####################################################################################
def test_reshape():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newShape = data.size
NumCpp.reshape(cArray, newShape)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(1, newShape))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newShape = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
NumCpp.reshape(cArray, newShape)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(shapeInput[::-1]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newShape = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
NumCpp.reshapeList(cArray, newShape)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(shapeInput[::-1]))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newNumCols = np.random.choice(np.array(list(factors(data.size))), 1).item()
NumCpp.reshape(cArray, -1, newNumCols)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(-1, newNumCols))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newNumRows = np.random.choice(np.array(list(factors(data.size))), 1).item()
NumCpp.reshape(cArray, newNumRows, -1)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(newNumRows, -1))
####################################################################################
def test_resize():
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput2 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput2[0].item(), shapeInput2[1].item())
cArray = NumCpp.NdArray(shape1)
data = np.random.randint(1, 100, [shape1.rows, shape1.cols], dtype=np.uint32)
cArray.setArray(data)
NumCpp.resizeFast(cArray, shape2)
assert cArray.shape().rows == shape2.rows
assert cArray.shape().cols == shape2.cols
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput2 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput2[0].item(), shapeInput2[1].item())
cArray = NumCpp.NdArray(shape1)
data = np.random.randint(1, 100, [shape1.rows, shape1.cols], dtype=np.uint32)
cArray.setArray(data)
NumCpp.resizeSlow(cArray, shape2)
assert cArray.shape().rows == shape2.rows
assert cArray.shape().cols == shape2.cols
####################################################################################
def test_right_shift():
shapeInput = np.random.randint(20, 100, [2, ])
bitsToshift = np.random.randint(1, 32, [1, ]).item()
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, np.iinfo(np.uint32).max, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.right_shift(cArray, bitsToshift).getNumpyArray(),
np.right_shift(data, bitsToshift))
####################################################################################
def test_rint():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert NumCpp.rintScaler(value) == np.rint(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(NumCpp.rintArray(cArray), np.rint(data))
####################################################################################
def test_rms():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert (np.round(NumCpp.rms(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item(), 9) ==
np.round(np.sqrt(np.mean(np.square(data), axis=None)).item(), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert (np.round(NumCpp.rms(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item(), 9) ==
np.round(np.sqrt(np.mean(np.square(data), axis=None)).item(), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=0)), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=0)), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=1)), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=1)), 9))
####################################################################################
def test_roll():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(0, data.size, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.roll(cArray, amount, NumCpp.Axis.NONE).getNumpyArray(),
np.roll(data, amount, axis=None))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(0, shape.cols, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.roll(cArray, amount, NumCpp.Axis.ROW).getNumpyArray(),
np.roll(data, amount, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(0, shape.rows, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.roll(cArray, amount, NumCpp.Axis.COL).getNumpyArray(),
np.roll(data, amount, axis=1))
####################################################################################
def test_rot90():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(1, 4, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.rot90(cArray, amount).getNumpyArray(), np.rot90(data, amount))
####################################################################################
def test_round():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert NumCpp.roundScaler(value, 10) == np.round(value, 10)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(NumCpp.roundArray(cArray, 9), np.round(data, 9))
####################################################################################
def test_row_stack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape3 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape4 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.row_stack(cArray1, cArray2, cArray3, cArray4),
np.row_stack([data1, data2, data3, data4]))
####################################################################################
def test_setdiff1d():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
data2 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.setdiff1d(cArray1, cArray2).getNumpyArray().flatten(),
np.setdiff1d(data1, data2))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.setdiff1d(cArray1, cArray2).getNumpyArray().flatten(),
np.setdiff1d(data1, data2))
####################################################################################
def test_shape():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
assert cArray.shape().rows == shape.rows and cArray.shape().cols == shape.cols
####################################################################################
def test_sign():
value = np.random.randn(1).item() * 100
assert NumCpp.signScaler(value) == np.sign(value)
value = np.random.randn(1).item() * 100 + 1j * np.random.randn(1).item() * 100
assert NumCpp.signScaler(value) == np.sign(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.signArray(cArray), np.sign(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.signArray(cArray), np.sign(data))
####################################################################################
def test_signbit():
value = np.random.randn(1).item() * 100
assert NumCpp.signbitScaler(value) == np.signbit(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.signbitArray(cArray), np.signbit(data))
####################################################################################
def test_sin():
value = np.random.randn(1).item()
assert np.round(NumCpp.sinScaler(value), 9) == np.round(np.sin(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.sinScaler(value), 9) == np.round(np.sin(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinArray(cArray), 9), np.round(np.sin(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinArray(cArray), 9), np.round(np.sin(data), 9))
####################################################################################
def test_sinc():
value = np.random.randn(1)
assert np.round(NumCpp.sincScaler(value.item()), 9) == np.round(np.sinc(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sincArray(cArray), 9), np.round(np.sinc(data), 9))
####################################################################################
def test_sinh():
value = np.random.randn(1).item()
assert np.round(NumCpp.sinhScaler(value), 9) == np.round(np.sinh(value), 9)
value = np.random.randn(1).item() + 1j * np.random.randn(1).item()
assert np.round(NumCpp.sinhScaler(value), 9) == np.round(np.sinh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinhArray(cArray), 9), np.round(np.sinh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randn(shape.rows, shape.cols) + 1j * np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinhArray(cArray), 9), np.round(np.sinh(data), 9))
####################################################################################
def test_size():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
assert cArray.size() == shapeInput.prod().item()
####################################################################################
def test_sort():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
d = data.flatten()
d.sort()
assert np.array_equal(NumCpp.sort(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(), d)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
d = data.flatten()
d.sort()
assert np.array_equal(NumCpp.sort(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(), d)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pSorted = np.sort(data, axis=0)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.ROW).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pSorted = np.sort(data, axis=0)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.ROW).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pSorted = np.sort(data, axis=1)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.COL).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pSorted = np.sort(data, axis=1)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.COL).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
####################################################################################
def test_sqrt():
value = np.random.randint(1, 100, [1, ]).item()
assert np.round(NumCpp.sqrtScaler(value), 9) == np.round(np.sqrt(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.sqrtScaler(value), 9) == np.round(np.sqrt(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sqrtArray(cArray), 9), np.round(np.sqrt(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sqrtArray(cArray), 9), np.round(np.sqrt(data), 9))
####################################################################################
def test_square():
value = np.random.randint(1, 100, [1, ]).item()
assert np.round(NumCpp.squareScaler(value), 9) == np.round(np.square(value), 9)
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
assert np.round(NumCpp.squareScaler(value), 9) == np.round(np.square(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.squareArray(cArray), 9), np.round(np.square(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.squareArray(cArray), 9), np.round(np.square(data), 9))
####################################################################################
def test_stack():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
cArray4 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data3 = np.random.randint(1, 100, [shape.rows, shape.cols])
data4 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.stack(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.ROW),
np.vstack([data1, data2, data3, data4]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
cArray4 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data3 = np.random.randint(1, 100, [shape.rows, shape.cols])
data4 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.stack(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.COL),
np.hstack([data1, data2, data3, data4]))
####################################################################################
def test_stdev():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.stdev(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.std(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.stdev(cArray, NumCpp.Axis.NONE).item() is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.stdev(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.std(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.stdev(cArray, NumCpp.Axis.ROW) is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.stdev(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.std(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.stdev(cArray, NumCpp.Axis.COL) is not None
####################################################################################
def test_subtract():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.subtract(cArray1, cArray2), data1 - data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(cArray, value), data - value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(value, cArray), value - data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.subtract(cArray1, cArray2), data1 - data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(cArray, value), data - value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(value, cArray), value - data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.subtract(cArray1, cArray2), data1 - data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.subtract(cArray1, cArray2), data1 - data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(cArray, value), data - value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(value, cArray), value - data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(cArray, value), data - value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(value, cArray), value - data)
####################################################################################
def test_sumo():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.sum(cArray, NumCpp.Axis.NONE).item() == np.sum(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.sum(cArray, NumCpp.Axis.NONE).item() == np.sum(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.sum(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.sum(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.sum(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.sum(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.sum(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.sum(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.sum(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.sum(data, axis=1))
####################################################################################
def test_swap():
shapeInput1 = np.random.randint(20, 100, [2, ])
shapeInput2 = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput2[0].item(), shapeInput2[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols]).astype(np.double)
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
NumCpp.swap(cArray1, cArray2)
assert (np.array_equal(cArray1.getNumpyArray(), data2) and
np.array_equal(cArray2.getNumpyArray(), data1))
####################################################################################
def test_swapaxes():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.swapaxes(cArray).getNumpyArray(), data.T)
####################################################################################
def test_tan():
value = np.random.rand(1).item() * np.pi
assert np.round(NumCpp.tanScaler(value), 9) == np.round(np.tan(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.tanScaler(value), 9) == np.round(np.tan(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.tanArray(cArray), 9), np.round(np.tan(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.tanArray(cArray), 9), np.round(np.tan(data), 9))
####################################################################################
def test_tanh():
value = np.random.rand(1).item() * np.pi
assert np.round(NumCpp.tanhScaler(value), 9) == np.round(np.tanh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.tanhScaler(value), 9) == np.round(np.tanh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.tanhArray(cArray), 9), np.round(np.tanh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.tanhArray(cArray), 9), np.round(np.tanh(data), 9))
####################################################################################
def test_tile():
shapeInput = np.random.randint(1, 10, [2, ])
shapeRepeat = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shapeR = NumCpp.Shape(shapeRepeat[0].item(), shapeRepeat[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.tileRectangle(cArray, shapeR.rows, shapeR.cols), np.tile(data, shapeRepeat))
shapeInput = np.random.randint(1, 10, [2, ])
shapeRepeat = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shapeR = NumCpp.Shape(shapeRepeat[0].item(), shapeRepeat[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.tileShape(cArray, shapeR), np.tile(data, shapeRepeat))
####################################################################################
def test_tofile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
filename = os.path.join(tempDir, 'temp.bin')
NumCpp.tofile(cArray, filename, '')
assert os.path.exists(filename)
data2 = np.fromfile(filename, np.double).reshape(shapeInput)
assert np.array_equal(data, data2)
os.remove(filename)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
filename = os.path.join(tempDir, 'temp.txt')
NumCpp.tofile(cArray, filename, '\n')
assert os.path.exists(filename)
data2 = np.fromfile(filename, dtype=np.double, sep='\n').reshape(shapeInput)
assert np.array_equal(data, data2)
os.remove(filename)
####################################################################################
def test_toStlVector():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
out = np.asarray(NumCpp.toStlVector(cArray))
assert np.array_equal(out, data.flatten())
####################################################################################
def test_trace():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
offset = np.random.randint(0, shape.rows, [1, ]).item()
assert np.array_equal(NumCpp.trace(cArray, offset, NumCpp.Axis.ROW), data.trace(offset, axis1=1, axis2=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
offset = np.random.randint(0, shape.rows, [1, ]).item()
assert np.array_equal(NumCpp.trace(cArray, offset, NumCpp.Axis.COL), data.trace(offset, axis1=0, axis2=1))
####################################################################################
def test_transpose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.transpose(cArray).getNumpyArray(), np.transpose(data))
####################################################################################
def test_trapz():
shape = NumCpp.Shape(np.random.randint(10, 20, [1, ]).item(), 1)
cArray = NumCpp.NdArray(shape)
coeffs = np.random.randint(0, 10, [2, ])
dx = np.random.rand(1).item()
data = np.array([x ** 2 - coeffs[0] * x + coeffs[1] for x in range(shape.size())])
cArray.setArray(data)
integralC = NumCpp.trapzDx(cArray, dx, NumCpp.Axis.NONE).item()
integralPy = np.trapz(data, dx=dx)
assert np.round(integralC, 8) == np.round(integralPy, 8)
shape = NumCpp.Shape(np.random.randint(10, 20, [1, ]).item(), np.random.randint(10, 20, [1, ]).item())
cArray = NumCpp.NdArray(shape)
coeffs = np.random.randint(0, 10, [2, ])
dx = np.random.rand(1).item()
data = np.array([x ** 2 - coeffs[0] * x - coeffs[1] for x in range(shape.size())]).reshape(shape.rows, shape.cols)
cArray.setArray(data)
integralC = NumCpp.trapzDx(cArray, dx, NumCpp.Axis.ROW).flatten()
integralPy = np.trapz(data, dx=dx, axis=0)
assert np.array_equal(np.round(integralC, 8), np.round(integralPy, 8))
shape = NumCpp.Shape(np.random.randint(10, 20, [1, ]).item(), np.random.randint(10, 20, [1, ]).item())
cArray = NumCpp.NdArray(shape)
coeffs = np.random.randint(0, 10, [2, ])
dx = np.random.rand(1).item()
data = np.array([x ** 2 - coeffs[0] * x - coeffs[1] for x in range(shape.size())]).reshape(shape.rows, shape.cols)
cArray.setArray(data)
integralC = NumCpp.trapzDx(cArray, dx, NumCpp.Axis.COL).flatten()
integralPy = np.trapz(data, dx=dx, axis=1)
assert np.array_equal(np.round(integralC, 8), np.round(integralPy, 8))
shape = NumCpp.Shape(1, np.random.randint(10, 20, [1, ]).item())
cArrayY = NumCpp.NdArray(shape)
cArrayX = NumCpp.NdArray(shape)
coeffs = np.random.randint(0, 10, [2, ])
dx = np.random.rand(shape.rows, shape.cols)
data = np.array([x ** 2 - coeffs[0] * x + coeffs[1] for x in range(shape.size())])
cArrayY.setArray(data)
cArrayX.setArray(dx)
integralC = NumCpp.trapz(cArrayY, cArrayX, NumCpp.Axis.NONE).item()
integralPy = np.trapz(data, x=dx)
assert np.round(integralC, 8) == np.round(integralPy, 8)
shape = NumCpp.Shape(np.random.randint(10, 20, [1, ]).item(), np.random.randint(10, 20, [1, ]).item())
cArrayY = NumCpp.NdArray(shape)
cArrayX = NumCpp.NdArray(shape)
coeffs = np.random.randint(0, 10, [2, ])
dx = np.random.rand(shape.rows, shape.cols)
data = np.array([x ** 2 - coeffs[0] * x + coeffs[1] for x in range(shape.size())]).reshape(shape.rows, shape.cols)
cArrayY.setArray(data)
cArrayX.setArray(dx)
integralC = NumCpp.trapz(cArrayY, cArrayX, NumCpp.Axis.ROW).flatten()
integralPy = np.trapz(data, x=dx, axis=0)
assert np.array_equal(np.round(integralC, 8), np.round(integralPy, 8))
shape = NumCpp.Shape(np.random.randint(10, 20, [1, ]).item(), np.random.randint(10, 20, [1, ]).item())
cArrayY = NumCpp.NdArray(shape)
cArrayX = NumCpp.NdArray(shape)
coeffs = np.random.randint(0, 10, [2, ])
dx = np.random.rand(shape.rows, shape.cols)
data = np.array([x ** 2 - coeffs[0] * x + coeffs[1] for x in range(shape.size())]).reshape(shape.rows, shape.cols)
cArrayY.setArray(data)
cArrayX.setArray(dx)
integralC = NumCpp.trapz(cArrayY, cArrayX, NumCpp.Axis.COL).flatten()
integralPy = np.trapz(data, x=dx, axis=1)
assert np.array_equal(np.round(integralC, 8), np.round(integralPy, 8))
####################################################################################
def test_tril():
squareSize = np.random.randint(10, 100, [1, ]).item()
offset = np.random.randint(0, squareSize // 2, [1, ]).item()
assert np.array_equal(NumCpp.trilSquare(squareSize, offset),
np.tri(squareSize, k=offset))
squareSize = np.random.randint(10, 100, [1, ]).item()
offset = np.random.randint(0, squareSize // 2, [1, ]).item()
assert np.array_equal(NumCpp.trilSquareComplex(squareSize, offset),
np.tri(squareSize, k=offset) + 1j * np.zeros([squareSize, squareSize]))
shapeInput = np.random.randint(10, 100, [2, ])
offset = np.random.randint(0, np.min(shapeInput) // 2, [1, ]).item()
assert np.array_equal(NumCpp.trilRect(shapeInput[0].item(), shapeInput[1].item(), offset),
np.tri(shapeInput[0].item(), shapeInput[1].item(), k=offset))
shapeInput = np.random.randint(10, 100, [2, ])
offset = np.random.randint(0, np.min(shapeInput) // 2, [1, ]).item()
assert np.array_equal(NumCpp.trilRectComplex(shapeInput[0].item(), shapeInput[1].item(), offset),
np.tri(shapeInput[0].item(), shapeInput[1].item(), k=offset) + 1j * np.zeros(shapeInput))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
offset = np.random.randint(0, shape.rows // 2, [1, ]).item()
assert np.array_equal(NumCpp.trilArray(cArray, offset),
np.tril(data, k=offset))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, shape.rows // 2, [1, ]).item()
assert np.array_equal(NumCpp.trilArray(cArray, offset),
np.tril(data, k=offset))
####################################################################################
def test_triu():
squareSize = np.random.randint(10, 100, [1, ]).item()
offset = np.random.randint(0, squareSize // 2, [1, ]).item()
assert np.array_equal(NumCpp.triuSquare(squareSize, offset),
np.tri(squareSize, k=-offset).T)
squareSize = np.random.randint(10, 100, [1, ]).item()
offset = np.random.randint(0, squareSize // 2, [1, ]).item()
assert np.array_equal(NumCpp.triuSquareComplex(squareSize, offset),
np.tri(squareSize, k=-offset).T + 1j * np.zeros([squareSize, squareSize]))
shapeInput = np.random.randint(10, 100, [2, ])
offset = np.random.randint(0, np.min(shapeInput), [1, ]).item()
# NOTE: numpy triu appears to have a bug... just check that NumCpp runs without error
assert NumCpp.triuRect(shapeInput[0].item(), shapeInput[1].item(), offset) is not None
shapeInput = np.random.randint(10, 100, [2, ])
offset = np.random.randint(0, np.min(shapeInput), [1, ]).item()
# NOTE: numpy triu appears to have a bug... just check that NumCpp runs without error
assert NumCpp.triuRectComplex(shapeInput[0].item(), shapeInput[1].item(), offset) is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
offset = np.random.randint(0, shape.rows // 2, [1, ]).item()
assert np.array_equal(NumCpp.triuArray(cArray, offset), np.triu(data, k=offset))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, shape.rows // 2, [1, ]).item()
assert np.array_equal(NumCpp.triuArray(cArray, offset), np.triu(data, k=offset))
####################################################################################
def test_trim_zeros():
numElements = np.random.randint(50, 100, [1, ]).item()
offsetBeg = np.random.randint(0, 10, [1, ]).item()
offsetEnd = np.random.randint(10, numElements, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
data[0, :offsetBeg] = 0
data[0, -offsetEnd:] = 0
cArray.setArray(data)
assert np.array_equal(NumCpp.trim_zeros(cArray, 'f').getNumpyArray().flatten(),
np.trim_zeros(data.flatten(), 'f'))
numElements = np.random.randint(50, 100, [1, ]).item()
offsetBeg = np.random.randint(0, 10, [1, ]).item()
offsetEnd = np.random.randint(10, numElements, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[0, :offsetBeg] = complex(0, 0)
data[0, -offsetEnd:] = complex(0, 0)
cArray.setArray(data)
assert np.array_equal(NumCpp.trim_zeros(cArray, 'f').getNumpyArray().flatten(),
np.trim_zeros(data.flatten(), 'f'))
numElements = np.random.randint(50, 100, [1, ]).item()
offsetBeg = np.random.randint(0, 10, [1, ]).item()
offsetEnd = np.random.randint(10, numElements, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
data[0, :offsetBeg] = 0
data[0, -offsetEnd:] = 0
cArray.setArray(data)
assert np.array_equal(NumCpp.trim_zeros(cArray, 'b').getNumpyArray().flatten(),
np.trim_zeros(data.flatten(), 'b'))
numElements = np.random.randint(50, 100, [1, ]).item()
offsetBeg = np.random.randint(0, 10, [1, ]).item()
offsetEnd = np.random.randint(10, numElements, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[0, :offsetBeg] = complex(0, 0)
data[0, -offsetEnd:] = complex(0, 0)
cArray.setArray(data)
assert np.array_equal(NumCpp.trim_zeros(cArray, 'b').getNumpyArray().flatten(),
np.trim_zeros(data.flatten(), 'b'))
numElements = np.random.randint(50, 100, [1, ]).item()
offsetBeg = np.random.randint(0, 10, [1, ]).item()
offsetEnd = np.random.randint(10, numElements, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
data[0, :offsetBeg] = 0
data[0, -offsetEnd:] = 0
cArray.setArray(data)
assert np.array_equal(NumCpp.trim_zeros(cArray, 'fb').getNumpyArray().flatten(),
np.trim_zeros(data.flatten(), 'fb'))
numElements = np.random.randint(50, 100, [1, ]).item()
offsetBeg = np.random.randint(0, 10, [1, ]).item()
offsetEnd = np.random.randint(10, numElements, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[0, :offsetBeg] = complex(0, 0)
data[0, -offsetEnd:] = complex(0, 0)
cArray.setArray(data)
assert np.array_equal(NumCpp.trim_zeros(cArray, 'fb').getNumpyArray().flatten(),
np.trim_zeros(data.flatten(), 'fb'))
####################################################################################
def test_trunc():
value = np.random.rand(1).item() * np.pi
assert NumCpp.truncScaler(value) == np.trunc(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(NumCpp.truncArray(cArray), np.trunc(data))
####################################################################################
def test_union1d():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
data2 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.union1d(cArray1, cArray2).getNumpyArray().flatten(), np.union1d(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.union1d(cArray1, cArray2).getNumpyArray().flatten(), np.union1d(data1, data2))
####################################################################################
def test_unique():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.unique(cArray).getNumpyArray().flatten(), np.unique(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.unique(cArray).getNumpyArray().flatten(), np.unique(data))
####################################################################################
def test_unwrap():
value = np.random.randn(1).item() * 3 * np.pi
assert np.round(NumCpp.unwrapScaler(value), 9) == np.round(np.arctan2(np.sin(value), np.cos(value)), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.unwrapArray(cArray), 9), np.round(np.arctan2(np.sin(data), np.cos(data)), 9))
####################################################################################
def test_var():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.var(cArray, NumCpp.Axis.NONE).item(), 8) == np.round(np.var(data), 8)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.var(cArray, NumCpp.Axis.NONE) is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.var(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 8),
np.round(np.var(data, axis=0), 8))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.var(cArray, NumCpp.Axis.ROW) is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.var(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 8),
np.round(np.var(data, axis=1), 8))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.var(cArray, NumCpp.Axis.COL) is not None
####################################################################################
def test_vstack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape3 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape4 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.vstack(cArray1, cArray2, cArray3, cArray4),
np.vstack([data1, data2, data3, data4]))
####################################################################################
def test_where():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayMask = NumCpp.NdArrayBool(shape)
cArrayA = NumCpp.NdArray(shape)
cArrayB = NumCpp.NdArray(shape)
dataMask = np.random.randint(0, 2, [shape.rows, shape.cols], dtype=bool)
dataA = np.random.randint(1, 100, [shape.rows, shape.cols])
dataB = np.random.randint(1, 100, [shape.rows, shape.cols])
cArrayMask.setArray(dataMask)
cArrayA.setArray(dataA)
cArrayB.setArray(dataB)
assert np.array_equal(NumCpp.where(cArrayMask, cArrayA, cArrayB), np.where(dataMask, dataA, dataB))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayMask = NumCpp.NdArrayBool(shape)
dataMask = np.random.randint(0, 2, [shape.rows, shape.cols], dtype=bool)
cArrayMask.setArray(dataMask)
cArrayA = NumCpp.NdArrayComplexDouble(shape)
realA = np.random.randint(1, 100, [shape.rows, shape.cols])
imagA = np.random.randint(1, 100, [shape.rows, shape.cols])
dataA = realA + 1j * imagA
cArrayA.setArray(dataA)
cArrayB = NumCpp.NdArrayComplexDouble(shape)
realB = np.random.randint(1, 100, [shape.rows, shape.cols])
imagB = np.random.randint(1, 100, [shape.rows, shape.cols])
dataB = realB + 1j * imagB
cArrayB.setArray(dataB)
assert np.array_equal(NumCpp.where(cArrayMask, cArrayA, cArrayB), np.where(dataMask, dataA, dataB))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayMask = NumCpp.NdArrayBool(shape)
cArrayA = NumCpp.NdArray(shape)
dataMask = np.random.randint(0, 2, [shape.rows, shape.cols], dtype=bool)
dataA = np.random.randint(1, 100, [shape.rows, shape.cols])
dataB = np.random.randint(1, 100)
cArrayMask.setArray(dataMask)
cArrayA.setArray(dataA)
assert np.array_equal(NumCpp.where(cArrayMask, cArrayA, dataB), np.where(dataMask, dataA, dataB))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayMask = NumCpp.NdArrayBool(shape)
dataMask = np.random.randint(0, 2, [shape.rows, shape.cols], dtype=bool)
cArrayMask.setArray(dataMask)
cArrayA = NumCpp.NdArrayComplexDouble(shape)
realA = np.random.randint(1, 100, [shape.rows, shape.cols])
imagA = np.random.randint(1, 100, [shape.rows, shape.cols])
dataA = realA + 1j * imagA
cArrayA.setArray(dataA)
realB = np.random.randint(1, 100)
imagB = np.random.randint(1, 100)
dataB = realB + 1j * imagB
assert np.array_equal(NumCpp.where(cArrayMask, cArrayA, dataB), np.where(dataMask, dataA, dataB))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayMask = NumCpp.NdArrayBool(shape)
cArrayB = NumCpp.NdArray(shape)
dataMask = np.random.randint(0, 2, [shape.rows, shape.cols], dtype=bool)
dataB = np.random.randint(1, 100, [shape.rows, shape.cols])
dataA = np.random.randint(1, 100)
cArrayMask.setArray(dataMask)
cArrayB.setArray(dataB)
assert np.array_equal(NumCpp.where(cArrayMask, dataA, cArrayB), np.where(dataMask, dataA, dataB))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayMask = NumCpp.NdArrayBool(shape)
dataMask = np.random.randint(0, 2, [shape.rows, shape.cols], dtype=bool)
cArrayMask.setArray(dataMask)
cArrayB = NumCpp.NdArrayComplexDouble(shape)
realB = np.random.randint(1, 100, [shape.rows, shape.cols])
imagB = np.random.randint(1, 100, [shape.rows, shape.cols])
dataB = realB + 1j * imagB
cArrayB.setArray(dataB)
realA = np.random.randint(1, 100)
imagA = np.random.randint(1, 100)
dataA = realA + 1j * imagA
assert np.array_equal(NumCpp.where(cArrayMask, dataA, cArrayB), np.where(dataMask, dataA, dataB))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayMask = NumCpp.NdArrayBool(shape)
dataMask = np.random.randint(0, 2, [shape.rows, shape.cols], dtype=bool)
cArrayMask.setArray(dataMask)
dataB = np.random.randint(1, 100)
dataA = np.random.randint(1, 100)
assert np.array_equal(NumCpp.where(cArrayMask, dataA, dataB), np.where(dataMask, dataA, dataB))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayMask = NumCpp.NdArrayBool(shape)
dataMask = np.random.randint(0, 2, [shape.rows, shape.cols], dtype=bool)
cArrayMask.setArray(dataMask)
realB = | np.random.randint(1, 100) | numpy.random.randint |
from typing import Union, Optional
import pytest
import scanpy as sc
import cellrank.external as cre
from anndata import AnnData
from cellrank.tl.kernels import ConnectivityKernel
from cellrank.external.kernels._utils import MarkerGenes
from cellrank.external.kernels._wot_kernel import LastTimePoint
import numpy as np
import pandas as pd
from scipy.sparse import spmatrix, csr_matrix
from pandas.core.dtypes.common import is_categorical_dtype
from matplotlib.cm import get_cmap
from matplotlib.colors import to_hex
class TestOTKernel:
def test_no_connectivities(self, adata_large: AnnData):
del adata_large.obsp["connectivities"]
terminal_states = np.full((adata_large.n_obs,), fill_value=np.nan, dtype=object)
ixs = np.where(adata_large.obs["clusters"] == "Granule immature")[0]
terminal_states[ixs] = "GI"
ok = cre.kernels.StationaryOTKernel(
adata_large,
terminal_states=pd.Series(terminal_states).astype("category"),
g=np.ones((adata_large.n_obs,), dtype=np.float64),
)
ok = ok.compute_transition_matrix(1, 0.001)
assert ok._conn is None
np.testing.assert_allclose(ok.transition_matrix.sum(1), 1.0)
def test_method_not_implemented(self, adata_large: AnnData):
terminal_states = np.full((adata_large.n_obs,), fill_value=np.nan, dtype=object)
ixs = np.where(adata_large.obs["clusters"] == "Granule immature")[0]
terminal_states[ixs] = "GI"
ok = cre.kernels.StationaryOTKernel(
adata_large,
terminal_states=pd.Series(terminal_states).astype("category"),
g=np.ones((adata_large.n_obs,), dtype=np.float64),
)
with pytest.raises(
NotImplementedError, match="Method `'unbal'` is not yet implemented."
):
ok.compute_transition_matrix(1, 0.001, method="unbal")
def test_no_terminal_states(self, adata_large: AnnData):
with pytest.raises(RuntimeError, match="Unable to initialize the kernel."):
cre.kernels.StationaryOTKernel(
adata_large,
g=np.ones((adata_large.n_obs,), dtype=np.float64),
)
def test_normal_run(self, adata_large: AnnData):
terminal_states = np.full((adata_large.n_obs,), fill_value=np.nan, dtype=object)
ixs = np.where(adata_large.obs["clusters"] == "Granule immature")[0]
terminal_states[ixs] = "GI"
ok = cre.kernels.StationaryOTKernel(
adata_large,
terminal_states=pd.Series(terminal_states).astype("category"),
g=np.ones((adata_large.n_obs,), dtype=np.float64),
)
ok = ok.compute_transition_matrix(1, 0.001)
assert isinstance(ok, cre.kernels.StationaryOTKernel)
assert isinstance(ok._transition_matrix, csr_matrix)
np.testing.assert_allclose(ok.transition_matrix.sum(1), 1.0)
assert isinstance(ok.params, dict)
@pytest.mark.parametrize("connectivity_kernel", (None, ConnectivityKernel))
def test_compute_projection(
self, adata_large: AnnData, connectivity_kernel: Optional[ConnectivityKernel]
):
terminal_states = np.full((adata_large.n_obs,), fill_value=np.nan, dtype=object)
ixs = np.where(adata_large.obs["clusters"] == "Granule immature")[0]
terminal_states[ixs] = "GI"
ok = cre.kernels.StationaryOTKernel(
adata_large,
terminal_states=pd.Series(terminal_states).astype("category"),
g=np.ones((adata_large.n_obs,), dtype=np.float64),
)
ok = ok.compute_transition_matrix(1, 0.001)
if connectivity_kernel is not None:
ck = connectivity_kernel(adata_large).compute_transition_matrix()
combined_kernel = 0.9 * ok + 0.1 * ck
combined_kernel.compute_transition_matrix()
else:
combined_kernel = ok
expected_error = (
r"<StationaryOTKernel> is not a kNN based kernel. The embedding projection "
r"only works for kNN based kernels."
)
with pytest.raises(AttributeError, match=expected_error):
combined_kernel.compute_projection()
class TestWOTKernel:
def test_no_connectivities(self, adata_large: AnnData):
del adata_large.obsp["connectivities"]
ok = cre.kernels.WOTKernel(
adata_large, time_key="age(days)"
).compute_transition_matrix()
assert ok._conn is None
np.testing.assert_allclose(ok.transition_matrix.sum(1), 1.0)
def test_invalid_solver_kwargs(self, adata_large: AnnData):
ok = cre.kernels.WOTKernel(adata_large, time_key="age(days)")
with pytest.raises(TypeError, match="unexpected keyword argument 'foo'"):
ok.compute_transition_matrix(foo="bar")
def test_inversion_updates_adata(self, adata_large: AnnData):
key = "age(days)"
ok = cre.kernels.WOTKernel(adata_large, time_key=key)
assert is_categorical_dtype(adata_large.obs[key])
assert adata_large.obs[key].cat.ordered
np.testing.assert_array_equal(ok.experimental_time, adata_large.obs[key])
orig_time = ok.experimental_time
ok = ~ok
inverted_time = ok.experimental_time
assert is_categorical_dtype(adata_large.obs[key])
assert adata_large.obs[key].cat.ordered
np.testing.assert_array_equal(ok.experimental_time, adata_large.obs[key])
np.testing.assert_array_equal(
orig_time.cat.categories, inverted_time.cat.categories
)
np.testing.assert_array_equal(orig_time.index, inverted_time.index)
with pytest.raises(AssertionError):
np.testing.assert_array_equal(orig_time, inverted_time)
@pytest.mark.parametrize("cmap", ["inferno", "viridis"])
def test_update_colors(self, adata_large: AnnData, cmap: str):
ckey = "age(days)_colors"
_ = cre.kernels.WOTKernel(adata_large, time_key="age(days)", cmap=cmap)
colors = adata_large.uns[ckey]
cmap = get_cmap(cmap)
assert isinstance(colors, np.ndarray)
assert colors.shape == (2,)
np.testing.assert_array_equal(colors, [to_hex(cmap(0)), to_hex(cmap(cmap.N))])
@pytest.mark.parametrize("cmat", [None, "Ms", "X_pca", "good_shape", "bad_shape"])
def test_cost_matrices(self, adata_large: AnnData, cmat: str):
ok = cre.kernels.WOTKernel(adata_large, time_key="age(days)")
if isinstance(cmat, str) and "shape" in cmat:
cost_matrices = {
(12.0, 35.0): np.random.normal(size=(73 + ("bad" in cmat), 127))
}
else:
cost_matrices = cmat
if cmat == "bad_shape":
with pytest.raises(ValueError, match=r"Expected cost matrix for time pair"):
ok.compute_transition_matrix(cost_matrices=cost_matrices)
else:
ok = ok.compute_transition_matrix(cost_matrices=cost_matrices)
param = ok.params["cost_matrices"]
if cmat == "Ms":
assert param == "layer:Ms"
elif cmat == "X_pca":
assert param == "obsm:X_pca"
elif cmat == "good_shape":
# careful, param is `nstr`, which does not equal anything
assert str(param) == "precomputed"
else:
assert param == "default"
@pytest.mark.parametrize("n_iters", [3, 5])
def test_growth_rates(self, adata_large: AnnData, n_iters: int):
ok = cre.kernels.WOTKernel(adata_large, time_key="age(days)")
ok = ok.compute_transition_matrix(growth_iters=n_iters)
assert isinstance(ok.growth_rates, pd.DataFrame)
np.testing.assert_array_equal(adata_large.obs_names, ok.growth_rates.index)
np.testing.assert_array_equal(
ok.growth_rates.columns, [f"g{i}" for i in range(n_iters + 1)]
)
np.testing.assert_array_equal(
adata_large.obs["estimated_growth_rates"], ok.growth_rates[f"g{n_iters}"]
)
assert ok.params["growth_iters"] == n_iters
@pytest.mark.parametrize("key_added", [None, "gr"])
def test_birth_death_process(self, adata_large: AnnData, key_added: Optional[str]):
np.random.seed(42)
adata_large.obs["foo"] = np.random.normal(size=(adata_large.n_obs,))
adata_large.obs["bar"] = np.random.normal(size=(adata_large.n_obs,))
ok = cre.kernels.WOTKernel(adata_large, time_key="age(days)")
gr = ok.compute_initial_growth_rates("foo", "bar", key_added=key_added)
if key_added is None:
assert isinstance(gr, pd.Series)
np.testing.assert_array_equal(gr.index, adata_large.obs_names)
else:
assert gr is None
assert "gr" in adata_large.obs
@pytest.mark.parametrize("ltp", list(LastTimePoint))
def test_last_time_point(self, adata_large: AnnData, ltp: LastTimePoint):
key = "age(days)"
ok = cre.kernels.WOTKernel(adata_large, time_key=key).compute_transition_matrix(
last_time_point=ltp,
conn_kwargs={"n_neighbors": 11},
threshold=None,
)
ixs = np.where(adata_large.obs[key] == 35.0)[0]
T = ok.transition_matrix[ixs, :][:, ixs].A
if ltp == LastTimePoint.UNIFORM:
np.testing.assert_allclose(T, np.ones_like(T) / float(len(ixs)))
elif ltp == LastTimePoint.DIAGONAL:
np.testing.assert_allclose(T, np.eye(len(ixs)))
elif ltp == LastTimePoint.CONNECTIVITIES:
adata_subset = adata_large[adata_large.obs[key] == 35.0]
sc.pp.neighbors(adata_subset, n_neighbors=11)
T_actual = (
ConnectivityKernel(adata_subset)
.compute_transition_matrix()
.transition_matrix.A
)
np.testing.assert_allclose(T, T_actual)
@pytest.mark.parametrize("organism", ["human", "mouse"])
def test_compute_scores_default(self, adata_large: AnnData, organism: str):
pk, ak = "p_score", "a_score"
if organism == "human":
adata_large.var_names = adata_large.var_names.str.upper()
ok = cre.kernels.WOTKernel(adata_large, time_key="age(days)")
assert pk not in ok.adata.obs
assert ak not in ok.adata.obs
ok.compute_initial_growth_rates(
organism=organism, proliferation_key=pk, apoptosis_key=ak, use_raw=False
)
assert pk in ok.adata.obs
assert ak in ok.adata.obs
def test_normal_run(self, adata_large: AnnData):
ok = cre.kernels.WOTKernel(adata_large, time_key="age(days)")
ok = ok.compute_transition_matrix()
assert isinstance(ok, cre.kernels.WOTKernel)
assert isinstance(ok._transition_matrix, csr_matrix)
np.testing.assert_allclose(ok.transition_matrix.sum(1), 1.0)
assert isinstance(ok.params, dict)
assert isinstance(ok.growth_rates, pd.DataFrame)
assert isinstance(ok.transport_maps, dict)
np.testing.assert_array_equal(adata_large.obs_names, ok.growth_rates.index)
np.testing.assert_array_equal(ok.growth_rates.columns, ["g0", "g1"])
assert isinstance(ok.transport_maps[12.0, 35.0], AnnData)
assert ok.transport_maps[12.0, 35.0].X.dtype == np.float64
@pytest.mark.parametrize("threshold", [None, 90, 100, "auto"])
def test_threshold(self, adata_large, threshold: Optional[Union[int, str]]):
ok = cre.kernels.WOTKernel(adata_large, time_key="age(days)")
ok = ok.compute_transition_matrix(threshold=threshold)
assert isinstance(ok._transition_matrix, csr_matrix)
np.testing.assert_allclose(ok.transition_matrix.sum(1), 1.0)
assert ok.params["threshold"] == threshold
if threshold == 100:
for row in ok.transition_matrix:
np.testing.assert_allclose(row.data, 1.0 / len(row.data))
def test_copy(self, adata_large: AnnData):
ok = cre.kernels.WOTKernel(adata_large, time_key="age(days)")
ok = ok.compute_transition_matrix()
ok2 = ok.copy()
assert isinstance(ok2, cre.kernels.WOTKernel)
assert ok is not ok2
| np.testing.assert_array_equal(ok.transition_matrix.A, ok2.transition_matrix.A) | numpy.testing.assert_array_equal |
from math import fabs
import numpy as np
from numba import jit
from numba.extending import overload
@overload(np.clip)
def np_clip(a, a_min, a_max, out=None):
"""
Numba Overload of np.clip
:type a: np.ndarray
:type a_min: int
:type a_max: int
:type out: np.ndarray
:rtype: np.ndarray
"""
if out is None:
out = np.empty_like(a)
for i in range(len(a)):
if a[i] < a_min:
out[i] = a_min
elif a[i] > a_max:
out[i] = a_max
else:
out[i] = a[i]
return out
@jit(nopython=True)
def convolve(data, kernel):
"""
Convolution 1D Array
:type data: np.ndarray
:type kernel: np.ndarray
:rtype: np.ndarray
"""
size_data = len(data)
size_kernel = len(kernel)
size_out = size_data - size_kernel + 1
out = np.array([np.nan] * size_out)
kernel = np.flip(kernel)
for i in range(size_out):
window = data[i:i + size_kernel]
out[i] = sum([window[j] * kernel[j] for j in range(size_kernel)])
return out
@jit(nopython=True)
def sma(data, period):
"""
Simple Moving Average
:type data: np.ndarray
:type period: int
:rtype: np.ndarray
"""
size = len(data)
out = np.array([np.nan] * size)
for i in range(period - 1, size):
window = data[i - period + 1:i + 1]
out[i] = | np.mean(window) | numpy.mean |
"""Orthogonal matching pursuit algorithms
"""
# Author: <NAME>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import array2d, as_float_array
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..utils.arrayfuncs import solve_triangular
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
L = np.empty((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
solve_triangular(L[:n_active, :n_active], L[n_active, :n_active])
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active]
else:
return gamma, indices[:n_active]
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = | np.empty(0) | numpy.empty |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Export PINNs (Schrodinger) model"""
import numpy as np
import mindspore.common.dtype as mstype
from mindspore import (Tensor, context, export, load_checkpoint,
load_param_into_net)
from src.Schrodinger.net import PINNs
def export_sch(num_neuron, N0, Nb, Nf, ck_file, export_format, export_name):
"""
export PINNs for Schrodinger model
Args:
num_neuron (int): number of neurons for fully connected layer in the network
N0 (int): number of data points sampled from the initial condition,
0<N0<=256 for the default NLS dataset
Nb (int): number of data points sampled from the boundary condition,
0<Nb<=201 for the default NLS dataset. Size of training set = N0+2*Nb
Nf (int): number of collocation points, collocation points are used
to calculate regularizer for the network from Schoringer equation.
0<Nf<=51456 for the default NLS dataset
ck_file (str): path for checkpoint file
export_format (str): file format to export
export_name (str): name of exported file
"""
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
layers = [2, num_neuron, num_neuron, num_neuron, num_neuron, 2]
lb = np.array([-5.0, 0.0])
ub = | np.array([5.0, np.pi/2]) | numpy.array |
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_validate
from mlutil.eval import TimeSeriesSplit
def test_dummy():
assert True
def test_TimeSeriesSplit():
X = np.vstack([ | np.random.normal(size=100) | numpy.random.normal |
import random
from numpy import array
from automaton import RuleParser
from automaton.CellState import CellState
from automaton.ProcessingFunction import get_neural_processing_function_bundle
from automaton.SimpleProcessor import SimpleProcessor
from automaton.World import World
from neural.SimpleLayeredNeuralNetwork import SimpleLayeredNeuralNetwork
from neural.SimpleNeuralNetwork import SimpleNeuralNetwork
from util.training import load_training_set, reduce_training_set
def learn_from_file(file_learn, file_output=None, cycles=10000, reduce=False, multi=False, neural_num=3):
training_set_tmp = load_training_set(file_learn)
if reduce:
training_set_tmp = reduce_training_set(training_set_tmp)
if multi:
network = SimpleLayeredNeuralNetwork()
network.add_layer(neural_num, len(training_set_tmp[0][0]))
network.add_layer(1, neural_num)
layers = network.layers
else:
network = SimpleNeuralNetwork(len(training_set_tmp[0][0]))
layers = network.synaptic_weights
network.print_weights()
network.train( | array(training_set_tmp[0]) | numpy.array |
# log-likelihoods for pymc3.
# requires theano
import numpy as np
from scipy.optimize._numdiff import approx_derivative
import theano.tensor as tt
class _LogLikeWithGrad(tt.Op):
# Theano op for calculating a log-likelihood
itypes = [tt.dvector] # expects a vector of parameter values when called
otypes = [tt.dscalar] # outputs a single scalar value (the log likelihood)
def __init__(self, loglike):
# add inputs as class attributes
self.likelihood = loglike
# initialise the gradient Op (below)
self.logpgrad = _LogLikeGrad(self.likelihood)
def perform(self, node, inputs, outputs):
# the method that is used when calling the Op
(theta,) = inputs # this will contain my variables
# call the log-likelihood function
logl = self.likelihood(theta)
outputs[0][0] = | np.array(logl) | numpy.array |
from __future__ import absolute_import, division, print_function
from six.moves import map
import numpy as np
import matplotlib as mpl
import utool as ut
#(print, print_, printDBG, rrr, profile) = utool.inject(__name__, '[custom_constants]', DEBUG=False)
ut.noinject(__name__, '[custom_constants]')
# GENERAL FONTS
SMALLEST = 6
SMALLER = 8
SMALL = 10
MED = 12
LARGE = 14
LARGER = 18
#fpargs = dict(family=None, style=None, variant=None, stretch=None, fname=None)
def FontProp(*args, **kwargs):
r""" overwrite fontproperties with custom settings
Kwargs:
fname=u'',
name=u'',
style=u'normal',
variant=u'normal',
weight=u'normal',
stretch=u'normal',
size=u'medium'
"""
kwargs['family'] = 'monospace'
font_prop = mpl.font_manager.FontProperties(*args, **kwargs)
return font_prop
FONTS = ut.DynStruct()
FONTS.smallest = FontProp(weight='light', size=SMALLEST)
FONTS.small = FontProp(weight='light', size=SMALL)
FONTS.smaller = FontProp(weight='light', size=SMALLER)
FONTS.med = FontProp(weight='light', size=MED)
FONTS.large = FontProp(weight='light', size=LARGE)
FONTS.medbold = FontProp(weight='bold', size=MED)
FONTS.largebold = FontProp(weight='bold', size=LARGE)
# SPECIFIC FONTS
if False:
# personal
FONTS.legend = FONTS.small
FONTS.figtitle = FONTS.med
FONTS.axtitle = FONTS.small
FONTS.subtitle = FONTS.med
#FONTS.xlabel = FONTS.smaller
FONTS.xlabel = FONTS.small
FONTS.ylabel = FONTS.small
FONTS.relative = FONTS.smallest
else:
# personal
FONTS.legend = FONTS.med
FONTS.figtitle = FONTS.large
FONTS.axtitle = FONTS.med
FONTS.subtitle = FONTS.med
#FONTS.xlabel = FONTS.smaller
FONTS.xlabel = FONTS.med
FONTS.ylabel = FONTS.med
FONTS.relative = FONTS.med
# COLORS
RED = np.array((255, 0, 0, 255)) / 255.0
YELLOW = np.array((255, 255, 0, 255)) / 255.0
GREEN = np.array(( 0, 255, 0, 255)) / 255.0
CYAN = np.array(( 0, 255, 255, 255)) / 255.0
BLUE = np.array(( 0, 0, 255, 255)) / 255.0
MAGENTA = np.array((255, 0, 255, 255)) / 255.0
ORANGE = np.array((255, 127, 0, 255)) / 255.0
BLACK = np.array(( 0, 0, 0, 255)) / 255.0
WHITE = np.array((255, 255, 255, 255)) / 255.0
GRAY = np.array((127, 127, 127, 255)) / 255.0
LIGHTGRAY = np.array((220, 220, 220, 255)) / 255.0
DEEP_PINK = np.array((255, 20, 147, 255)) / 255.0
PINK = np.array((255, 100, 100, 255)) / 255.0
LIGHT_PINK = np.array((255, 200, 200, 255)) / 255.0
FALSE_RED = np.array((255, 51, 0, 255)) / 255.0
TRUE_GREEN = np.array(( 0, 255, 0, 255)) / 255.0
# TRUE_BLUE = np.array(( 0, 255, 255, 255)) / 255.0
TRUE_BLUE = np.array(( 0, 115, 207, 255)) / 255.0
DARK_GREEN = np.array(( 0, 127, 0, 255)) / 255.0
DARK_BLUE = np.array(( 0, 0, 127, 255)) / 255.0
DARK_RED = np.array((127, 0, 0, 255)) / 255.0
DARK_ORANGE = np.array((127, 63, 0, 255)) / 255.0
DARK_YELLOW = np.array((127, 127, 0, 255)) / 255.0
PURPLE = np.array((102, 0, 153, 255)) / 255.0
BRIGHT_PURPLE = np.array((255, 0, 255, 255)) / 255.0
LIGHT_PURPLE = np.array((255, 102, 255, 255)) / 255.0
BRIGHT_GREEN = np.array(( 39, 255, 20, 255)) / 255.0
PURPLE2 = np.array((150, 51, 200, 255)) / 255.0
LIGHT_BLUE = np.array((102, 100, 255, 255)) / 255.0
LIGHT_GREEN = np.array((102, 255, 102, 255)) / 255.0
NEUTRAL = np.array((225, 229, 231, 255)) / 255.0
NEUTRAL_BLUE = np.array((159, 159, 241, 255)) / 255.0
UNKNOWN_PURP = PURPLE
# GOLDEN RATIOS
PHI_numer = 1 + | np.sqrt(5) | numpy.sqrt |
#!/usr/bin/env python
# coding: utf-8
# ## Model Training and Evaluation
# ### Load Preprocessed data
# In[1]:
# retrieve the preprocessed data from previous notebook
get_ipython().run_line_magic('store', '-r x_train')
get_ipython().run_line_magic('store', '-r x_test')
get_ipython().run_line_magic('store', '-r y_train')
get_ipython().run_line_magic('store', '-r y_test')
get_ipython().run_line_magic('store', '-r yy')
get_ipython().run_line_magic('store', '-r le')
# ### Initial model architecture - MLP
#
# We will start with constructing a Multilayer Perceptron (MLP) Neural Network using Keras and a Tensorflow backend.
#
# Starting with a `sequential` model so we can build the model layer by layer.
#
# We will begin with a simple model architecture, consisting of three layers, an input layer, a hidden layer and an output layer. All three layers will be of the `dense` layer type which is a standard layer type that is used in many cases for neural networks.
#
# The first layer will receive the input shape. As each sample contains 40 MFCCs (or columns) we have a shape of (1x40) this means we will start with an input shape of 40.
#
# The first two layers will have 256 nodes. The activation function we will be using for our first 2 layers is the `ReLU`, or `Rectified Linear Activation`. This activation function has been proven to work well in neural networks.
#
# We will also apply a `Dropout` value of 50% on our first two layers. This will randomly exclude nodes from each update cycle which in turn results in a network that is capable of better generalisation and is less likely to overfit the training data.
#
# Our output layer will have 10 nodes (num_labels) which matches the number of possible classifications. The activation is for our output layer is `softmax`. Softmax makes the output sum up to 1 so the output can be interpreted as probabilities. The model will then make its prediction based on which option has the highest probability.
# In[2]:
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import Adam
from keras.utils import np_utils
from sklearn import metrics
num_labels = yy.shape[1]
filter_size = 2
# Construct model
model = Sequential()
model.add(Dense(256, input_shape=(40,)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_labels))
model.add(Activation('softmax'))
# ### Compiling the model
#
# For compiling our model, we will use the following three parameters:
#
# * Loss function - we will use `categorical_crossentropy`. This is the most common choice for classification. A lower score indicates that the model is performing better.
#
# * Metrics - we will use the `accuracy` metric which will allow us to view the accuracy score on the validation data when we train the model.
#
# * Optimizer - here we will use `adam` which is a generally good optimizer for many use cases.
#
# In[3]:
# Compile the model
model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')
# In[4]:
# Display model architecture summary
model.summary()
# Calculate pre-training accuracy
score = model.evaluate(x_test, y_test, verbose=0)
accuracy = 100*score[1]
print("Pre-training accuracy: %.4f%%" % accuracy)
# ### Training
#
# Here we will train the model.
#
# We will start with 100 epochs which is the number of times the model will cycle through the data. The model will improve on each cycle until it reaches a certain point.
#
# We will also start with a low batch size, as having a large batch size can reduce the generalisation ability of the model.
# In[5]:
from keras.callbacks import ModelCheckpoint
from datetime import datetime
num_epochs = 100
num_batch_size = 32
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.basic_mlp.hdf5',
verbose=1, save_best_only=True)
start = datetime.now()
model.fit(x_train, y_train, batch_size=num_batch_size, epochs=num_epochs, validation_data=(x_test, y_test), callbacks=[checkpointer], verbose=1)
duration = datetime.now() - start
print("Training completed in time: ", duration)
# ### Test the model
#
# Here we will review the accuracy of the model on both the training and test data sets.
# In[6]:
# Evaluating the model on the training and testing set
score = model.evaluate(x_train, y_train, verbose=0)
print("Training Accuracy: ", score[1])
score = model.evaluate(x_test, y_test, verbose=0)
print("Testing Accuracy: ", score[1])
# The initial Training and Testing accuracy scores are quite high. As there is not a great difference between the Training and Test scores (~5%) this suggests that the model has not suffered from overfitting.
# ### Predictions
#
# Here we will build a method which will allow us to test the models predictions on a specified audio .wav file.
# In[7]:
import librosa
import numpy as np
def extract_feature(file_name):
try:
audio_data, sample_rate = librosa.load(file_name, res_type='kaiser_fast')
mfccs = librosa.feature.mfcc(y=audio_data, sr=sample_rate, n_mfcc=40)
mfccsscaled = np.mean(mfccs.T,axis=0)
except Exception as e:
print("Error encountered while parsing file: ", file)
return None, None
return np.array([mfccsscaled])
# In[8]:
def print_prediction(file_name):
prediction_feature = extract_feature(file_name)
predicted_vector = model.predict_classes(prediction_feature)
predicted_class = le.inverse_transform(predicted_vector)
print("The predicted class is:", predicted_class[0], '\n')
predicted_proba_vector = model.predict_proba(prediction_feature)
predicted_proba = predicted_proba_vector[0]
for i in range(len(predicted_proba)):
category = le.inverse_transform( | np.array([i]) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 2 23:07:56 2017
@author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.linalg as scipylin
def bvpinit(A, mesh, degree, Proj_L, B_L, Proj_R, B_R):
# Begin with some error checking to make sure that everything will work
# Check that A and the projection conditions are callable. If not (i.e.
# they are arrays), then make them callable so that the code in solve can
# treat them as if they are callable objects. This way, the user can
# either pass in arrays or functions
if not callable(A):
A_copy = A.copy()
def A(x): return A_copy
if not callable(Proj_L):
Proj_L_copy = Proj_L.copy()
def Proj_L(x): return Proj_L_copy
if not callable(Proj_R):
Proj_R_copy = Proj_R.copy()
def Proj_R(x): return Proj_R_copy
if not callable(B_L):
B_L_copy = B_L.copy()
def B_L(x): return B_L_copy
if not callable(B_R):
B_R_copy = B_R.copy()
def B_R(x): return B_R_copy
# Get our system dimensions, and the number of intervals
sys_dim = len(A(mesh[0]))
num_intervals = len(mesh)-1
# Check that mesh is in increasing order
if np.all(mesh != sorted(mesh)):
raise ValueError("To initialize cheby_bvp, mesh must be in order from"+
" least to greatest.")
# Check that A returns a square shape
dimA = A(1).shape
if (len(dimA) != 2) or (dimA[0] != dimA[1]):
raise ValueError("To initialize cheby_bvp, A must be square, but "+
"has shape "+str(dimA))
# Check that Proj_L and Proj_R return arrays that have 2 dimensions
dimPR = Proj_R(mesh[-1]).shape
dimPL = Proj_L(mesh[0]).shape
if len(dimPR) != 2 or len(dimPL) != 2:
raise ValueError("To initialize cheby_bvp, Proj_L and Proj_R must"+
" have 2 dimensions. However, Proj_L has "+str(len(dimPL))+
" dimensions, and Proj_R has "+str(len(dimPR))+" dimensions.")
# Check that the number of boundary conditions do not exceed the dimensions
# of our system
dimL = B_L(mesh[0]).shape[0]
dimR = B_R(mesh[-1]).shape[0]
if dimL + dimR != sys_dim:
raise ValueError("""Cannot initialize cheby_bvp because the system's
boundary conditions are overdetermined or underdetermined.
You must have len(B_L)+len(B_R) == sys_dim, where sys_dim is
A.shape[0] (e.g. if A is a 4x4, len(B_L)+len(B_R) = 4) Currently,
you have len(B_L) = """+str(dimL)+", len(B_R) = "+str(dimR)+""",
and sys_dim = """+str(sys_dim)+".")
# Initialize d, the dict with subinterval information
d = Struct(
{ 'A': A,
'Proj_L': Proj_L,
'B_L': B_L,
'Proj_R': Proj_R,
'B_R': B_R,
'N': np.zeros((num_intervals), dtype=np.intp), #Is this field necessary?
'a': np.zeros((num_intervals)),
'b': np.zeros((num_intervals)),
'theta': np.zeros((num_intervals, degree)),
'nodes_0': np.zeros((num_intervals, degree)), #Is this field necessary?
'nodes': np.zeros((num_intervals, degree)),
'Tcf': np.zeros((num_intervals, degree, degree),dtype=np.complex),
'Ta': np.zeros((num_intervals, degree),dtype=np.complex),
'Tb': np.zeros((num_intervals, degree),dtype=np.complex),
'Ta_x': np.zeros((num_intervals, degree),dtype=np.complex),
'Tb_x': np.zeros((num_intervals, degree),dtype=np.complex),
'T': np.zeros((num_intervals, degree, degree),dtype=np.complex),
'T_x': np.zeros((num_intervals, degree, degree),dtype=np.complex),
'T_xcf': np.zeros((num_intervals, degree, degree),dtype=np.complex),
'cf': np.zeros((num_intervals, sys_dim, degree),dtype=np.complex),
'cfx': np.zeros((num_intervals, sys_dim, degree),dtype=np.complex),
'err': np.zeros((num_intervals)),
'x': [],
'y': [],
'dim': sys_dim
})
# Filling in subinterval values [a,b]
d['a'] = mesh[:-1]
d['b'] = mesh[1:]
# Degree of polynomials
d['N'].fill(degree)
out = cheby_bvp(d)
return out
class Struct(dict):
"""
Struct inherits from dict and adds this functionality:
Instead of accessing the keys of struct by typing
struct['key'], one may instead type struct.key.
These two options will do exactly the same thing. A new
Struct object can also be created with a dict as an input
parameter, and the resulting Struct object will have the
same data members as the dict passed to it.
"""
def __init__(self,inpt={}):
super(Struct,self).__init__(inpt)
def __getattr__(self, name):
return self.__getitem__(name)
def __setattr__(self,name,value):
self.__setitem__(name,value)
class cheby_bvp(Struct):
"""
The cheby_bvp class inherits from Struct. When a user calls bvpinit, a
cheby_bvp object is returned which will have the methods below, as well as
the data fields in bvpinit, as its attributes.
"""
def __init__(self,startStruct):
super(cheby_bvp,self).__init__(startStruct)
def solve(self, max_err=None, xSize=25):
"""
solve takes a cheby_bvp object and solves the boundary value problem
that is contained in it.
"""
#d = dict(d) # copies d so that init could be reused if wanted; should it be copied?
sys_dim = len(self['A'](self['a'][0]))
num_intervals = len(self['a'])
# total number of nodes we solve for; iterates through each interval
equ_dim = 0;
# interval specific objects
for j in range(num_intervals):
# degree of polynomial
degreeRange = np.array(range(self['N'][j]))
equ_dim = equ_dim + self['N'][j]
# Chebyshev nodes
self['theta'][j] = (degreeRange + 0.5)*np.pi/self['N'][j]
# nodes in [-1,1]
self['nodes_0'][j] = np.cos(self['theta'][j])
# nodes in [a,b]
self['nodes'][j] = 0.5*(self['a'][j]+self['b'][j])+0.5*(self['a'][j]-self['b'][j])*self['nodes_0'][j]
# Transformation to get Chebyshev coefficients
Id2 = (2/self['N'][j])*np.eye(self['N'][j])
Id2[0,0] = Id2[0,0]/2
self['Tcf'][j] = Id2.dot(np.cos(np.outer(self['theta'][j], degreeRange)).T)
# Chebyshev polynomials at the end points
self['Ta'][j] = np.cos(np.zeros((1,self['N'][j])))
self['Tb'][j] = np.cos(np.pi*degreeRange)
# Derivative of Chebyshev polynomials at end points
self['Ta_x'][j] = | np.square(degreeRange) | numpy.square |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Imaging
improve:
reinit, uncert,
rand_norm, rand_splitnorm, rand_pointing,
slice, slice_inv_sq, crop, rebin, groupixel
smooth, artifact, mask
Jy_per_pix_to_MJy_per_sr(improve):
header, image, wave
iuncert(improve):
unc
islice(improve):
image, wave, filenames, clean
icrop(improve):
header, image, wave
irebin(improve):
header, image, wave
igroupixel(improve):
header, image, wave
ismooth(improve):
header, image, wave
imontage(improve):
reproject, reproject_mc, coadd, clean
iswarp(improve):
footprint, combine, combine_mc, clean
iconvolve(improve):
spitzer_irs, choker, do_conv, image, wave,
filenames, clean
cupid(improve):
spec_build, sav_build,
header, image, wave
wmask, wclean, interfill, hextract, hswarp,
concatenate
"""
from tqdm import tqdm, trange
import os
import math
import numpy as np
from scipy.io import readsav
from scipy.interpolate import interp1d
from astropy import wcs
from astropy.io import ascii
from astropy.table import Table
from reproject import reproject_interp, reproject_exact, reproject_adaptive
from reproject.mosaicking import reproject_and_coadd
import subprocess as SP
import warnings
# warnings.filterwarnings("ignore", category=RuntimeWarning)
# warnings.filterwarnings("ignore", message="Skipping SYSTEM_VARIABLE record")
## Local
from utilities import InputError
from inout import (fitsext, csvext, ascext, fclean,
read_fits, write_fits, savext, write_hdf5,
# read_csv, write_csv, read_ascii,
)
from arrays import listize, closest, pix2sup, sup2pix
from maths import nanavg, bsplinterpol
from astrom import fixwcs, get_pc, pix2sr
##-----------------------------------------------
##
## <improve> based tools
##
##-----------------------------------------------
class improve:
'''
IMage PROcessing VEssel
'''
def __init__(self, filIN=None, header=None, image=None, wave=None,
wmod=0, verbose=False):
'''
self: filIN, wmod, hdr, w, cdelt, pc, cd, Ndim, Nx, Ny, Nw, im, wvl
'''
## INPUTS
self.filIN = filIN
self.wmod = wmod
self.verbose = verbose
## Read image/cube
if filIN is not None:
ds = read_fits(filIN)
self.hdr = ds.header
self.im = ds.data
self.wvl = ds.wave
else:
self.hdr = header
self.im = image
self.wvl = wave
if self.im is not None:
self.Ndim = self.im.ndim
if self.Ndim==3:
self.Nw, self.Ny, self.Nx = self.im.shape
## Nw=1 patch
if self.im.shape[0]==1:
self.Ndim = 2
elif self.Ndim==2:
self.Ny, self.Nx = self.im.shape
self.Nw = None
if self.hdr is not None:
hdr = self.hdr.copy()
ws = fixwcs(header=hdr, mode='red_dim')
self.hdred = ws.header # reduced header
self.w = ws.wcs
pcdelt = get_pc(wcs=ws.wcs)
self.cdelt = pcdelt.cdelt
self.pc = pcdelt.pc
self.cd = pcdelt.cd
if verbose==True:
print('<improve> file: ', filIN)
print('Raw size (pix): {} * {}'.format(self.Nx, self.Ny))
def reinit(self, filIN=None, header=None, image=None, wave=None,
wmod=0, verbose=False):
'''
Update init variables
'''
## INPUTS
self.filIN = filIN
self.wmod = wmod
self.verbose = verbose
## Read image/cube
if filIN is not None:
ds = read_fits(filIN)
self.hdr = ds.header
self.im = ds.data
self.wvl = ds.wave
else:
self.hdr = header
self.im = image
self.wvl = wave
self.Ndim = self.im.ndim
self.hdr['NAXIS'] = self.Ndim
if self.Ndim==3:
self.Nw, self.Ny, self.Nx = self.im.shape
## Nw=1 patch
if self.im.shape[0]==1:
self.Ndim = 2
del self.hdr['NAXIS3']
else:
self.hdr['NAXIS3'] = self.Nw
elif self.Ndim==2:
self.Ny, self.Nx = self.im.shape
self.Nw = None
self.hdr['NAXIS2'] = self.Ny
self.hdr['NAXIS1'] = self.Nx
hdr = self.hdr.copy()
ws = fixwcs(header=hdr, mode='red_dim')
self.hdred = ws.header # reduced header
self.w = ws.wcs
pcdelt = get_pc(wcs=ws.wcs)
self.cdelt = pcdelt.cdelt
self.pc = pcdelt.pc
self.cd = pcdelt.cd
if verbose==True:
print('<improve> file: ', filIN)
print('Image size (pix): {} * {}'.format(self.Nx, self.Ny))
def uncert(self, filOUT=None, filUNC=None, filWGT=None, wfac=1.,
BG_image=None, BG_weight=None, zerovalue=np.nan):
'''
Estimate uncertainties from the background map
So made error map is uniform/weighted
------ INPUT ------
filOUT output uncertainty map (FITS)
filUNC input uncertainty map (FITS)
filWGT input weight map (FITS)
wfac multiplication factor for filWGT (Default: 1)
BG_image background image array used to generate unc map
BG_weight background weight array
zerovalue value used to replace zero value (Default: NaN)
------ OUTPUT ------
unc estimated unc map
'''
if filUNC is not None:
unc = read_fits(filUNC).data
else:
if BG_image is not None:
im = BG_image
Ny, Nx = BG_image.shape
else:
im = self.im
Ny = self.Ny
Nx = self.Nx
Nw = self.Nw
## sigma: std dev of (weighted) flux distribution of bg region
if BG_weight is not None:
if self.Ndim==3:
sigma = np.nanstd(im * BG_weight, axis=(1,2))
elif self.Ndim==2:
sigma = np.nanstd(im * BG_weight)
else:
if self.Ndim==3:
sigma = np.nanstd(im, axis=(1,2))
elif self.Ndim==2:
sigma = np.nanstd(im)
## wgt: weight map
if filWGT is not None:
wgt = read_fits(filWGT).data * wfac
else:
wgt = np.ones(self.im.shape) * wfac
## unc: weighted rms = root of var/wgt
if self.Ndim==3:
unc = []
for w in range(Nw):
unc.append(np.sqrt(1./wgt[w,:,:]) * sigma(w))
unc = np.array(unc)
elif self.Ndim==2:
unc = np.sqrt(1./wgt) * sigma
## Replace zero values
unc[unc==0] = zerovalue
self.unc = unc
if filOUT is not None:
write_fits(filOUT, self.hdr, unc, self.wvl, self.wmod)
return unc
def rand_norm(self, filUNC=None, unc=None, sigma=1., mu=0.):
'''
Add random N(0,1) noise
'''
if filUNC is not None:
unc = read_fits(filUNC).data
if unc is not None:
## unc should have the same dimension with im
theta = np.random.normal(mu, sigma, self.im.shape)
self.im += theta * unc
return self.im
def rand_splitnorm(self, filUNC=None, unc=None, sigma=1., mu=0.):
'''
Add random SN(0,lam,lam*tau) noise
------ INPUT ------
filUNC 2 FITS files for unc of left & right sides
unc 2 uncertainty ndarrays
------ OUTPUT ------
'''
if filUNC is not None:
unc = []
for f in filUNC:
unc.append(read_fits(f).data)
if unc is not None:
## unc[i] should have the same dimension with self.im
tau = unc[1]/unc[0]
peak = 1/(1+tau)
theta = np.random.normal(mu, sigma, self.im.shape) # ~N(0,1)
flag = np.random.random(self.im.shape) # ~U(0,1)
if self.Ndim==2:
for x in range(self.Nx):
for y in range(self.Ny):
if flag[y,x]<peak[y,x]:
self.im[y,x] += -abs(theta[y,x]) * unc[0][y,x]
else:
self.im[y,x] += abs(theta[y,x]) * unc[1][y,x]
elif self.Ndim==3:
for x in range(self.Nx):
for y in range(self.Ny):
for k in range(self.Nw):
if flag[k,y,x]<peak[k,y,x]:
self.im[k,y,x] += -abs(
theta[k,y,x]) * unc[0][k,y,x]
else:
self.im[k,y,x] += abs(
theta[k,y,x]) * unc[1][k,y,x]
return self.im
def rand_pointing(self, sigma=0, header=None, fill='med',
xscale=1, yscale=1, swarp=False, tmpdir=None):
'''
Add pointing uncertainty to WCS
------ INPUT ------
sigma pointing accuracy (arcsec)
header baseline
fill fill value of no data regions after shift
'med': axis median (default)
'avg': axis average
'near': nearest non-NaN value on the same axis
float: constant
xscale,yscale regrouped super pixel size
swarp use SWarp to perform position shifts
Default: False (not support supix)
------ OUTPUT ------
'''
if sigma>=0:
sigma /= 3600.
d_ro = abs(np.random.normal(0., sigma)) # N(0,sigma)
d_phi = np.random.random() *2. * np.pi # U(0,2*pi)
# d_ro, d_phi = 0.0002, 4.5
# print('d_ro,d_phi = ', d_ro,d_phi)
## New header/WCS
if header is None:
header = self.hdr
wcs = fixwcs(header=header, mode='red_dim').wcs
Nx = header['NAXIS1']
Ny = header['NAXIS2']
newheader = header.copy()
newheader['CRVAL1'] += d_ro * np.cos(d_phi)
newheader['CRVAL2'] += d_ro * np.sin(d_phi)
newcs = fixwcs(header=newheader, mode='red_dim').wcs
## Convert world increment to pix increment
pix = wcs.all_world2pix(newheader['CRVAL1'], newheader['CRVAL2'], 1)
d_x = pix[0] - header['CRPIX1']
d_y = pix[1] - header['CRPIX2']
# print('Near CRPIXn increments: ', d_x, d_y)
# val1 = np.array(newcs.all_pix2world(0.5, 0.5, 1))
# d_x, d_y = wcs.all_world2pix(val1[np.newaxis,:], 1)[0] - 0.5
# print('Near (1,1) increments: ', d_x, d_y)
oldimage = self.im
## Resampling
if swarp:
## Set path of tmp files (SWarp use only)
if tmpdir is None:
path_tmp = os.getcwd()+'/tmp_swp/'
else:
path_tmp = tmpdir
if not os.path.exists(path_tmp):
os.makedirs(path_tmp)
## Works but can be risky since iswarp.combine included rand_pointing...
write_fits(path_tmp+'tmp_rand_shift',
newheader, self.im, self.wvl)
swp = iswarp(refheader=self.hdr, tmpdir=path_tmp)
rep = swp.combine(path_tmp+'tmp_rand_shift',
combtype='avg', keepedge=True)
self.im = rep.data
else:
if self.Ndim==3:
Nxs = math.ceil(Nx/xscale)
cube_supx = np.zeros((self.Nw,Ny,Nxs))
frac2 = d_x / xscale
f2 = math.floor(frac2)
frac1 = 1 - frac2
for xs in range(Nxs):
if frac2>=0:
x0 = sup2pix(0, xscale, Npix=Nx, origin=0)
else:
x0 = sup2pix(Nxs-1, xscale, Npix=Nx, origin=0)
if fill=='med':
fill_value = np.nanmedian(self.im,axis=2)
elif fill=='avg':
fill_value = np.nanmean(self.im,axis=2)
elif fill=='near':
fill_value = np.nanmean(self.im[:,:,x0[0]:x0[-1]+1],axis=2)
else:
fill_value = fill
if frac2>=0:
if xs>=f2:
x1 = sup2pix(xs-f2, xscale, Npix=Nx, origin=0)
cube_supx[:,:,xs] += (f2+frac1) * np.nanmean(self.im[:,:,x1[0]:x1[-1]+1],axis=2)
if xs>f2:
x2 = sup2pix(xs-f2-1, xscale, Npix=Nx, origin=0)
cube_supx[:,:,xs] += (frac2-f2) * np.nanmean(self.im[:,:,x2[0]:x2[-1]+1],axis=2)
else:
cube_supx[:,:,xs] += (frac2-f2) * fill_value
else:
cube_supx[:,:,xs] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super x = {}'.format(xs))
else:
if xs<=Nxs+f2:
x2 = sup2pix(xs-f2-1, xscale, Npix=Nx, origin=0)
cube_supx[:,:,xs] += (frac2-f2) * np.nanmean(self.im[:,:,x2[0]:x2[-1]+1],axis=2)
if xs<Nxs+f2:
x1 = sup2pix(xs-f2, xscale, Npix=Nx, origin=0)
cube_supx[:,:,xs] += (f2+frac1) * np.nanmean(self.im[:,:,x1[0]:x1[-1]+1],axis=2)
else:
cube_supx[:,:,xs] += (f2+frac1) * fill_value
else:
cube_supx[:,:,xs] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super x = {}'.format(xs))
Nys = math.ceil(Ny/yscale)
supcube = np.zeros((self.Nw,Nys,Nxs))
frac2 = d_y / yscale
f2 = math.floor(frac2)
frac1 = 1 - frac2
for ys in range(Nys):
if frac2>=0:
y0 = sup2pix(0, yscale, Npix=Ny, origin=0)
else:
y0 = sup2pix(Nys-1, yscale, Npix=Ny, origin=0)
if fill=='med':
fill_value = np.nanmedian(cube_supx,axis=1)
elif fill=='avg':
fill_value = np.nanmean(cube_supx,axis=1)
elif fill=='near':
fill_value = np.nanmean(cube_supx[:,y0[0]:y0[-1]+1,:],axis=1)
else:
fill_value = fill
if frac2>=0:
if ys>=f2:
y1 = sup2pix(ys-f2, yscale, Npix=Ny, origin=0)
supcube[:,ys,:] += (f2+frac1) * np.nanmean(cube_supx[:,y1[0]:y1[-1]+1,:],axis=1)
if ys>f2:
y2 = sup2pix(ys-f2-1, yscale, Npix=Ny, origin=0)
supcube[:,ys,:] += (frac2-f2) * np.nanmean(cube_supx[:,y2[0]:y2[-1]+1,:],axis=1)
else:
supcube[:,ys,:] += (frac2-f2) * fill_value
else:
supcube[:,ys,:] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super y = {}'.format(ys))
else:
if ys<=Nys+f2:
y2 = sup2pix(ys-f2-1, yscale, Npix=Ny, origin=0)
supcube[:,ys,:] += (frac2-f2) * np.nanmean(cube_supx[:,y2[0]:y2[-1]+1,:],axis=1)
if ys<Nys+f2:
y1 = sup2pix(ys-f2, yscale, Npix=Ny, origin=0)
supcube[:,ys,:] += (f2+frac1) * np.nanmean(cube_supx[:,y1[0]-1:y1[-1],:],axis=1)
else:
supcube[:,ys,:] += (f2+frac1) * fill_value
else:
supcube[:,ys,:] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super y = {}'.format(ys))
for x in range(Nx):
for y in range(Ny):
xs = pix2sup(x, xscale, origin=0)
ys = pix2sup(y, yscale, origin=0)
self.im[:,y,x] = supcube[:,ys,xs]
elif self.Ndim==2:
Nxs = math.ceil(Nx/xscale)
cube_supx = np.zeros((Ny,Nxs))
frac2 = d_x / xscale
f2 = math.floor(frac2)
frac1 = 1 - frac2
for xs in range(Nxs):
if frac2>=0:
x0 = sup2pix(0, xscale, Npix=Nx, origin=0)
else:
x0 = sup2pix(Nxs-1, xscale, Npix=Nx, origin=0)
if fill=='med':
fill_value = np.nanmedian(self.im,axis=1)
elif fill=='avg':
fill_value = np.nanmean(self.im,axis=1)
elif fill=='near':
fill_value = np.nanmean(self.im[:,x0[0]:x0[-1]+1],axis=1)
else:
fill_value = fill
if frac2>=0:
if xs>=f2:
x1 = sup2pix(xs-f2, xscale, Npix=Nx, origin=0)
cube_supx[:,xs] += (f2+frac1) * np.nanmean(self.im[:,x1[0]:x1[-1]+1],axis=1)
if xs>f2:
x2 = sup2pix(xs-f2-1, xscale, Npix=Nx, origin=0)
cube_supx[:,xs] += (frac2-f2) * np.nanmean(self.im[:,x2[0]:x2[-1]+1],axis=1)
else:
cube_supx[:,xs] += (frac2-f2) * fill_value
else:
cube_supx[:,xs] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super x = {}'.format(xs))
else:
if xs<=Nxs+f2:
x2 = sup2pix(xs-f2-1, xscale, Npix=Nx, origin=0)
cube_supx[:,xs] += (frac2-f2) * np.nanmean(self.im[:,x2[0]:x2[-1]+1],axis=1)
if xs<Nxs+f2:
x1 = sup2pix(xs-f2, xscale, Npix=Nx, origin=0)
cube_supx[:,xs] += (f2+frac1) * np.nanmean(self.im[:,x1[0]:x1[-1]+1],axis=1)
else:
cube_supx[:,xs] += (f2+frac1) * fill_value
else:
cube_supx[:,xs] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super x = {}'.format(xs))
Nys = math.ceil(Ny/yscale)
supcube = np.zeros((Nys,Nxs))
frac2 = d_y / yscale
f2 = math.floor(frac2)
frac1 = 1 - frac2
for ys in range(Nys):
if frac2>=0:
y0 = sup2pix(0, yscale, Npix=Ny, origin=0)
else:
y0 = sup2pix(Nys-1, yscale, Npix=Ny, origin=0)
if fill=='med':
fill_value = np.nanmedian(cube_supx,axis=0)
elif fill=='avg':
fill_value = np.nanmean(cube_supx,axis=0)
elif fill=='near':
fill_value = np.nanmean(cube_supx[y0[0]:y0[-1]+1,:],axis=0)
else:
fill_value = fill
if frac2>=0:
if ys>=f2:
y1 = sup2pix(ys-f2, yscale, Npix=Ny, origin=0)
supcube[ys,:] += (f2+frac1) * np.nanmean(cube_supx[y1[0]:y1[-1]+1,:],axis=0)
if ys>f2:
y2 = sup2pix(ys-f2-1, yscale, Npix=Ny, origin=0)
supcube[ys,:] += (frac2-f2) * np.nanmean(cube_supx[y2[0]:y2[-1]+1,:],axis=0)
else:
supcube[ys,:] += (frac2-f2) * fill_value
else:
supcube[ys,:] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super y = {}'.format(ys))
else:
if ys<=Nys+f2:
y2 = sup2pix(ys-f2-1, yscale, Npix=Ny, origin=0)
supcube[ys,:] += (frac2-f2) * np.nanmean(cube_supx[y2[0]:y2[-1]+1,:],axis=0)
if ys<Nys+f2:
y1 = sup2pix(ys-f2, yscale, Npix=Ny, origin=0)
supcube[ys,:] += (f2+frac1) * np.nanmean(cube_supx[y1[0]-1:y1[-1],:],axis=0)
else:
supcube[ys,:] += (f2+frac1) * fill_value
else:
supcube[ys,:] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super y = {}'.format(ys))
for x in range(Nx):
for y in range(Ny):
xs = pix2sup(x, xscale, origin=0)
ys = pix2sup(y, yscale, origin=0)
self.im[y,x] = supcube[ys,xs]
## Original NaN mask
mask_nan = np.isnan(oldimage)
self.im[mask_nan] = np.nan
## Recover new NaN pixels with zeros
mask_recover = np.logical_and(np.isnan(self.im), ~mask_nan)
self.im[mask_recover] = 0
return self.im
def slice(self, filSL, postfix='', ext=''):
## 3D cube slicing
slist = []
if self.Ndim==3:
# hdr = self.hdr.copy()
# for kw in self.hdr.keys():
# if '3' in kw:
# del hdr[kw]
# hdr['NAXIS'] = 2
for k in range(self.Nw):
## output filename list
f = filSL+'_'+'0'*(4-len(str(k)))+str(k)+postfix
slist.append(f+ext)
write_fits(f, self.hdred, self.im[k,:,:]) # gauss_noise inclu
elif self.Ndim==2:
f = filSL+'_0000'+postfix
slist.append(f+ext)
write_fits(f, self.hdred, self.im) # gauss_noise inclu
if self.verbose==True:
print('Input file is a 2D image which cannot be sliced! ')
print('Rewritten with only random noise added (if provided).')
return slist
def slice_inv_sq(self, filSL, postfix=''):
## Inversed square cube slicing
inv_sq = 1./self.im**2
slist = []
if self.Ndim==3:
# hdr = self.hdr.copy()
# for kw in self.hdr.keys():
# if '3' in kw:
# del hdr[kw]
# hdr['NAXIS'] = 2
for k in range(self.Nw):
## output filename list
f = filSL+'_'+'0'*(4-len(str(k)))+str(k)+postfix
slist.append(f)
write_fits(f, self.hdred, inv_sq[k,:,:]) # gauss_noise inclu
elif self.Ndim==2:
f = filSL+'_0000'+postfix
slist.append(f)
write_fits(f, self.hdred, inv_sq) # gauss_noise inclu
return slist
def crop(self, filOUT=None,
sizpix=None, cenpix=None, sizval=None, cenval=None):
'''
If pix and val co-exist, pix will be taken.
------ INPUT ------
filOUT output file
sizpix crop size in pix (dx, dy)
cenpix crop center in pix (x, y)
sizval crop size in deg (dRA, dDEC) -> (dx, dy)
cenval crop center in deg (RA, DEC) -> (x, y)
------ OUTPUT ------
self.im cropped image array
'''
oldimage = self.im
hdr = self.hdr
## Crop center
##-------------
if cenpix is None:
if cenval is None:
raise ValueError('Crop center unavailable! ')
else:
## Convert coord
try:
cenpix = np.array(self.w.all_world2pix(cenval[0], cenval[1], 1))
except wcs.wcs.NoConvergence as e:
cenpix = e.best_solution
print("Best solution:\n{0}".format(e.best_solution))
print("Achieved accuracy:\n{0}".format(e.accuracy))
print("Number of iterations:\n{0}".format(e.niter))
else:
cenval = self.w.all_pix2world(np.array([cenpix]), 1)[0]
if not (0<cenpix[0]-0.5<self.Nx and 0<cenpix[1]-0.5<self.Ny):
raise ValueError('Crop centre overpassed image border! ')
## Crop size
##-----------
if sizpix is None:
if sizval is None:
raise ValueError('Crop size unavailable! ')
else:
## CDELTn needed (Physical increment at the reference pixel)
sizpix = np.array(sizval) / abs(self.cdelt)
sizpix = np.array([math.floor(n) for n in sizpix])
else:
sizval = np.array(sizpix) * abs(self.cdelt)
if self.verbose==True:
print('----------')
print("Crop centre (RA, DEC): [{:.8}, {:.8}]".format(*cenval))
print("Crop size (dRA, dDEC): [{}, {}]\n".format(*sizval))
print("Crop centre (x, y): [{}, {}]".format(*cenpix))
print("Crop size (dx, dy): [{}, {}]".format(*sizpix))
print('----------')
## Lowerleft origin
##------------------
xmin = math.floor(cenpix[0] - sizpix[0]/2.)
ymin = math.floor(cenpix[1] - sizpix[1]/2.)
xmax = xmin + sizpix[0]
ymax = ymin + sizpix[1]
if not (xmin>=0 and xmax<=self.Nx and ymin>=0 and ymax<=self.Ny):
raise ValueError('Crop region overpassed image border! ')
## OUTPUTS
##---------
## New image
if self.Ndim==3:
newimage = oldimage[:, ymin:ymax, xmin:xmax] # gauss_noise inclu
## recover 3D non-reduced header
# hdr = read_fits(self.filIN).header
elif self.Ndim==2:
newimage = oldimage[ymin:ymax, xmin:xmax] # gauss_noise inclu
## Modify header
##---------------
hdr['CRPIX1'] = math.floor(sizpix[0]/2. + 0.5)
hdr['CRPIX2'] = math.floor(sizpix[1]/2. + 0.5)
hdr['CRVAL1'] = cenval[0]
hdr['CRVAL2'] = cenval[1]
self.hdr = hdr
self.im = newimage
## Write cropped image/cube
if filOUT is not None:
# comment = "[ICROP]ped at centre: [{:.8}, {:.8}]. ".format(*cenval)
# comment = "with size [{}, {}] (pix).".format(*sizpix)
write_fits(filOUT, self.hdr, self.im, self.wvl, self.wmod)
## Update self variables
self.reinit(header=self.hdr, image=self.im, wave=self.wvl,
wmod=self.wmod, verbose=self.verbose)
return self.im
def rebin(self, pixscale=None, total=False, extrapol=False, filOUT=None):
'''
Shrinking (box averaging) or expanding (bilinear interpolation) astro images
New/old images collimate on zero point.
[REF] IDL lib frebin/hrebin
https://idlastro.gsfc.nasa.gov/ftp/pro/astrom/hrebin.pro
https://github.com/wlandsman/IDLAstro/blob/master/pro/frebin.pro
------ INPUT ------
pixscale output pixel scale in arcsec/pixel
scalar - square pixel
tuple - same Ndim with image
total Default: False
True - sum the non-NaN pixels
False - mean
extrapol Default: False
True - value weighted by non NaN fractions
False - NaN if any fraction is NaN
filOUT output file
------ OUTPUT ------
newimage rebinned image array
'''
oldimage = self.im
hdr = self.hdr
oldheader = hdr.copy()
oldw = self.w
# cd = w.pixel_scale_matrix
oldcd = self.cd
oldcdelt = self.cdelt
oldNx = self.Nx
oldNy = self.Ny
if pixscale is not None:
pixscale = listize(pixscale)
if len(pixscale)==1:
pixscale.extend(pixscale)
else:
warnings.warn('Non-square pixels present as square on DS9. '
'WCS will not work either.')
## convert arcsec to degree
cdelt = np.array(pixscale) / 3600.
## Expansion (>1) or contraction (<1) in X/Y
xratio = cdelt[0] / abs(oldcdelt[0])
yratio = cdelt[1] / abs(oldcdelt[1])
else:
pixscale = listize(abs(oldcdelt) * 3600.)
xratio = 1.
yratio = 1.
if self.verbose==True:
print('----------')
print('The actual map size is {} * {}'.format(self.Nx, self.Ny))
print('The actual pixel scale is {} * {} arcsec'.format(*pixscale))
print('----------')
raise InputError('<improve.rebin>',
'No pixscale, nothing has been done!')
## Modify header
##---------------
## Fix CRVALn
crpix1 = hdr['CRPIX1']
crpix2 = hdr['CRPIX2']
hdr['CRPIX1'] = (crpix1 - 0.5) / xratio + 0.5
hdr['CRPIX2'] = (crpix2 - 0.5) / yratio + 0.5
cd = oldcd * [xratio,yratio]
hdr['CD1_1'] = cd[0][0]
hdr['CD2_1'] = cd[1][0]
hdr['CD1_2'] = cd[0][1]
hdr['CD2_2'] = cd[1][1]
for kw in oldheader.keys():
if 'PC' in kw:
del hdr[kw]
if 'CDELT' in kw:
del hdr[kw]
# lam = yratio/xratio
# pix_ratio = xratio*yratio
Nx = math.ceil(oldNx / xratio)
Ny = math.ceil(oldNy / yratio)
# Nx = int(oldNx/xratio + 0.5)
# Ny = int(oldNy/yratio + 0.5)
## Rebin
##-------
'''
## Ref: poppy(v0.3.4).utils.krebin
## Klaus P's fastrebin from web
sh = shape[0],a.shape[0]//shape[0],shape[1],a.shape[1]//shape[1]
return a.reshape(sh).sum(-1).sum(1)
'''
if self.Ndim==3:
image_newx = | np.zeros((self.Nw,oldNy,Nx)) | numpy.zeros |
#!/usr/bin/python
# **************************
# * Author : baiyyang
# * Email : <EMAIL>
# * Description :
# * create time : 2018/3/13下午5:05
# * file name : data_helpers.py
import numpy as np
import re
import jieba
import string
from zhon.hanzi import punctuation
import collections
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
def clean_str(s):
"""
Tokenization/string cleaning for all datasets excepts for SSI.
:param s:
:return:
"""
s = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", s)
s = re.sub(r"\'s", " \'s", s)
s = re.sub(r"\'ve", " \'ve", s)
s = re.sub(r"n\'t", " n\'t", s)
s = re.sub(r"\'re", " \'re", s)
s = re.sub(r"\'d", " \'d", s)
s = re.sub(r"\'ll", " \'ll", s)
s = re.sub(r",", " , ", s)
s = re.sub(r"!", " ! ", s)
s = re.sub(r"\(", " \( ", s)
s = re.sub(r"\)", " \) ", s)
s = re.sub(r"\?", " \? ", s)
s = re.sub(r"\s{2,}", " ", s)
return s.strip().lower()
def load_data_and_labels(positive_data_file, negative_data_file):
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Return split sentences and labels.
:param positive_data_file:
:param negative_data_file:
:return:
"""
# Load data from files
positive_examples = list(open(positive_data_file, 'r', encoding='utf-8').readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open(negative_data_file, 'r', encoding='utf-8').readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
# Generate labels
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
return [x_text, y]
def load_data_and_labels_chinese(train_data_file, test_data_file):
"""
加载中文医疗疾病分类数据集
:param train_data_file:
:param test_data_file:
:return:
"""
words = []
contents = []
train_datas = []
test_datas = []
test_labels = []
labels = []
# 生成训练数据集
with open(train_data_file, 'r', encoding='utf-8') as f:
for line in f:
data, label = line.strip().split('\t')
labels.append(label)
# 分词
segments = [seg for seg in jieba.cut(data, cut_all=False)]
segments_ = [seg.strip() for seg in segments if seg not in punctuation
and seg not in string.punctuation]
contents.append([seg_ for seg_ in segments_ if seg_ != ''])
words.extend(segments_)
words = [word for word in words if word != '']
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(9999))
word2id = {}
for word, _ in count:
word2id[word] = len(word2id)
# id2word = dict(zip(word2id.values(), word2id.keys()))
print('dictionary_size:', len(word2id))
sentence_max_length = max([len(content) for content in contents])
print('sentence_max_length:', sentence_max_length)
for content in contents:
train_data = [word2id[word] if word in word2id.keys() else word2id['UNK'] for word in content]
train_data.extend([0] * (sentence_max_length - len(train_data)))
train_datas.append(train_data)
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(np.array(labels))
onehot_encoder = OneHotEncoder(sparse=False)
train_labels = onehot_encoder.fit_transform(integer_encoded.reshape(len(integer_encoded), 1))
print(train_labels.shape)
# 生成测试数据集
labels = []
contents = []
with open(test_data_file, 'r', encoding='utf-8') as f:
for line in f:
data, label = line.strip().split('\t')
labels.append(label)
# 分词
segments = [segment for segment in jieba.cut(data, cut_all=False)]
segments_ = [segment.strip() for segment in segments if segment not in punctuation
and segment not in string.punctuation]
contents.append([seg_ for seg_ in segments_ if seg_ != ''])
for content in contents:
test_data = [word2id[word] if word in word2id.keys() else word2id['UNK'] for word in content]
if sentence_max_length > len(test_data):
test_data.extend([0] * (sentence_max_length - len(test_data)))
else:
test_data = test_data[:sentence_max_length]
test_datas.append(test_data)
integer_encoded = label_encoder.fit_transform( | np.array(labels) | numpy.array |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# <NAME>
# Created : 10/24/2017
# Last Modified: 04/06/2018
# Vanderbilt University
from __future__ import print_function, division, absolute_import
__author__ =['<NAME>']
__copyright__ =["Copyright 2017 <NAME>, "]
__email__ =['<EMAIL>']
__maintainer__ =['<NAME>']
"""
Computes the 1-halo `Quenched` fractions for SDSS DR7
"""
# Path to Custom Utilities folder
import os
import sys
import git
# Importing Modules
from cosmo_utils import mock_catalogues as cm
from cosmo_utils import utils as cu
from cosmo_utils.utils import file_utils as cfutils
from cosmo_utils.utils import file_readers as cfreaders
from cosmo_utils.utils import work_paths as cwpaths
from cosmo_utils.utils import stats_funcs as cstats
from cosmo_utils.mock_catalogues import catls_utils as cmcu
import numpy as num
import math
import pandas as pd
import pickle
#sns.set()
from progressbar import (Bar, ETA, FileTransferSpeed, Percentage, ProgressBar,
ReverseBar, RotatingMarker)
# Extra-modules
from argparse import ArgumentParser
from argparse import HelpFormatter
from operator import attrgetter
import copy
from datetime import datetime
from multiprocessing import Pool, Process, cpu_count
## Functions
## --------- PARSING ARGUMENTS ------------##
class SortingHelpFormatter(HelpFormatter):
def add_arguments(self, actions):
"""
Modifier for `argparse` help parameters, that sorts them alphabetically
"""
actions = sorted(actions, key=attrgetter('option_strings'))
super(SortingHelpFormatter, self).add_arguments(actions)
def _str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def _check_pos_val(val, val_min=0):
"""
Checks if value is larger than `val_min`
Parameters
----------
val: int or float
value to be evaluated by `val_min`
val_min: float or int, optional (default = 0)
minimum value that `val` can be
Returns
-------
ival: float
value if `val` is larger than `val_min`
Raises
-------
ArgumentTypeError: Raised if `val` is NOT larger than `val_min`
"""
ival = float(val)
if ival <= val_min:
msg = '`{0}` is an invalid input!'.format(ival)
msg += '`val` must be larger than `{0}`!!'.format(val_min)
raise argparse.ArgumentTypeError(msg)
return ival
def get_parser():
"""
Get parser object for `eco_mocks_create.py` script.
Returns
-------
args:
input arguments to the script
"""
## Define parser object
description_msg = 'Script to evaluate 1-halo conformity quenched fractions \
on SDSS DR7'
parser = ArgumentParser(description=description_msg,
formatter_class=SortingHelpFormatter,)
#
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
## Number of HOD's to create. Dictates how many different types of
## mock catalogues to create
parser.add_argument('-hod_model_n',
dest='hod_n',
help="Number of distinct HOD model to use. Default = 0",
type=int,
choices=range(0,1),
metavar='[0]',
default=0)
## Type of dark matter halo to use in the simulation
parser.add_argument('-halotype',
dest='halotype',
help='Type of the DM halo.',
type=str,
choices=['so','fof'],
default='fof')
## CLF/CSMF method of assigning galaxy properties
parser.add_argument('-clf_method',
dest='clf_method',
help="""
Method for assigning galaxy properties to mock
galaxies. Options:
(1) = Independent assignment of (g-r), sersic, logssfr
(2) = (g-r) decides active/passive designation and
draws values independently.
(3) (g-r) decides active/passive designation, and
assigns other galaxy properties for that given
galaxy.
""",
type=int,
choices=[1,2,3],
default=3)
## SDSS Sample
parser.add_argument('-sample',
dest='sample',
help='SDSS Luminosity sample to analyze',
type=int,
choices=[19,20,21],
default=19)
## SDSS Kind
parser.add_argument('-kind',
dest='catl_kind',
help='Type of data being analyzed.',
type=str,
choices=['data','mocks'],
default='data')
## SDSS Type
parser.add_argument('-abopt',
dest='catl_type',
help='Type of Abund. Matching used in catalogue',
type=str,
choices=['mr'],
default='mr')
## Total number of Iterations
parser.add_argument('-itern',
dest='itern_tot',
help='Total number of iterations to perform on the `shuffled` scenario',
type=int,
choices=range(10,10000),
metavar='[10-10000]',
default=1000)
## Minimum Number of Galaxies in 1 group
parser.add_argument('-nmin',
dest='ngals_min',
help='Minimum number of galaxies in a galaxy group',
type=int,
default=2)
## Bin in Group mass
parser.add_argument('-mg',
dest='Mg_bin',
help='Bin width for the group masses',
type=_check_pos_val,
default=0.4)
## Logarithm of the galaxy property
parser.add_argument('-log',
dest='prop_log',
help='Use `log` or `non-log` units for `M*` and `sSFR`',
type=str,
choices=['log', 'nonlog'],
default='log')
## Mock Start
parser.add_argument('-catl_start',
dest='catl_start',
help='Starting index of mock catalogues to use',
type=int,
choices=range(101),
metavar='[0-100]',
default=0)
## Mock Finish
parser.add_argument('-catl_finish',
dest='catl_finish',
help='Finishing index of mock catalogues to use',
type=int,
choices=range(101),
metavar='[0-100]',
default=100)
## `Perfect Catalogue` Option
parser.add_argument('-perf',
dest='perf_opt',
help='Option for using a `Perfect` catalogue',
type=_str2bool,
default=False)
## Type of correlation function to perform
parser.add_argument('-corrtype',
dest='corr_type',
help='Type of correlation function to perform',
type=str,
choices=['galgal'],
default='galgal')
## Shuffling Marks
parser.add_argument('-shuffle',
dest='shuffle_marks',
help='Option for shuffling marks of Cens. and Sats.',
choices=['cen_sh', 'sat_sh', 'censat_sh'],
default='censat_sh')
## Option for removing file
parser.add_argument('-remove',
dest='remove_files',
help='Delete pickle file containing pair counts',
type=_str2bool,
default=False)
## Type of error estimation
parser.add_argument('-sigma',
dest='type_sigma',
help='Type of error to use. Percentiles or St. Dev.',
type=str,
choices=['std','perc'],
default='std')
## Statistics for evaluating conformity
parser.add_argument('-frac_stat',
dest='frac_stat',
help='Statistics to use to evaluate the conformity signal',
type=str,
choices=['diff', 'ratio'],
default='diff')
## CPU Counts
parser.add_argument('-cpu',
dest='cpu_frac',
help='Fraction of total number of CPUs to use',
type=float,
default=0.75)
## Show Progbar
parser.add_argument('-prog',
dest='prog_bar',
help='Option to print out progress bars for each for loop',
type=_str2bool,
default=True)
## Program message
parser.add_argument('-progmsg',
dest='Prog_msg',
help='Program message to use throught the script',
type=str,
default=cfutils.Program_Msg(__file__))
## Random Seed
parser.add_argument('-seed',
dest='seed',
help='Random seed to be used for the analysis',
type=int,
metavar='[0-4294967295]',
default=1)
## Random Seed for CLF
parser.add_argument('-clf_seed',
dest='clf_seed',
help='Random seed to be used for CLF',
type=int,
metavar='[0-4294967295]',
default=0)
## Parsing Objects
args = parser.parse_args()
return args
def add_to_dict(param_dict):
"""
Aggregates extra variables to dictionary
Parameters
----------
param_dict: python dictionary
dictionary with input parameters and values
Returns
----------
param_dict: python dictionary
dictionary with old and new values added
"""
### Sample - Int
sample_s = str(param_dict['sample'])
### Sample - Mr
sample_Mr = 'Mr{0}'.format(param_dict['sample'])
### Perfect Catalogue
if param_dict['perf_opt']:
perf_str = 'haloperf'
else:
perf_str = ''
### Figure
fig_idx = 24
### Survey Details
sample_title = r'\boldmath$M_{r}< -%d$' %(param_dict['sample'])
## Project Details
# String for Main directories
param_str_arr = [ param_dict['catl_kind'] , param_dict['catl_type'] ,
param_dict['sample'] , param_dict['prop_log'] ,
param_dict['Mg_bin'] , param_dict['itern_tot'] ,
param_dict['ngals_min' ], param_dict['shuffle_marks'],
param_dict['type_sigma'], param_dict['frac_stat'] ,
perf_str ]
param_str_p = 'kind_{0}_type_{1}_sample_{2}_proplog_{3}_Mgbin_{4}_'
param_str_p += 'itern_{5}_nmin_{6}_sh_marks_{7}_type_sigma_{8}_'
param_str_p += 'fracstat_{9}_perf_str_{10}'
param_str = param_str_p.format(*param_str_arr)
###
### To dictionary
param_dict['sample_s' ] = sample_s
param_dict['sample_Mr' ] = sample_Mr
param_dict['perf_str' ] = perf_str
param_dict['fig_idx' ] = fig_idx
param_dict['sample_title' ] = sample_title
param_dict['param_str' ] = param_str
return param_dict
def param_vals_test(param_dict):
"""
Checks if values are consistent with each other.
Parameters
-----------
param_dict: python dictionary
dictionary with `project` variables
Raises
-----------
ValueError: Error
This function raises a `ValueError` error if one or more of the
required criteria are not met
"""
##
## Check the `perf_opt` for when `catl_kind` is 'data'
if (param_dict['catl_kind']=='data') and (param_dict['perf_opt']):
msg = '{0} `catl_kind` ({1}) must smaller than `perf_opt` ({2})'\
.format(
param_dict['Prog_msg'],
param_dict['catl_kind'],
param_dict['perf_opt'])
raise ValueError(msg)
else:
pass
##
## Checking that `nmin` is larger than 2
if param_dict['ngals_min'] >= 2:
pass
else:
msg = '{0} `ngals_min` ({1}) must be larger than `2`'.format(
param_dict['Prog_msg'],
param_dict['ngals_min'])
raise ValueError(msg)
##
## Checking `cpu_frac` range
if (param_dict['cpu_frac'] > 0) and (param_dict['cpu_frac'] <= 1):
pass
else:
msg = '{0} `cpu_frac` ({1}) must be between (0,1]'.format(
param_dict['Prog_msg'],
param_dict['cpu_frac'])
raise ValueError(msg)
##
## Checking that `catl_start` < `catl_finish`
if param_dict['catl_start'] < param_dict['catl_finish']:
pass
else:
msg = '{0} `catl_start` ({1}) must smaller than `catl_finish` ({2})'\
.format(
param_dict['Prog_msg'],
param_dict['catl_start'],
param_dict['catl_finish'])
raise ValueError(msg)
def directory_skeleton(param_dict, proj_dict):
"""
Creates the directory skeleton for the current project
Parameters
----------
param_dict: python dictionary
dictionary with `project` variables
proj_dict: python dictionary
dictionary with info of the project that uses the
`Data Science` Cookiecutter template.
Returns
---------
proj_dict: python dictionary
Dictionary with current and new paths to project directories
"""
### MCF Folder prefix
if param_dict['catl_kind'] == 'data':
path_prefix = os.path.join( 'SDSS',
param_dict['catl_kind'],
param_dict['catl_type'],
param_dict['sample_Mr'],
'Frac_results')
elif param_dict['catl_kind'] == 'mocks':
path_prefix = os.path.join( 'SDSS',
param_dict['catl_kind'],
'halos_{0}'.format(param_dict['halotype']),
'hod_model_{0}'.format(param_dict['hod_n']),
'clf_seed_{0}'.format(param_dict['clf_seed']),
'clf_method_{0}'.format(param_dict['clf_method']),
param_dict['catl_type'],
param_dict['sample_Mr'],
'Frac_results')
### MCF Output directory - Results
pickdir = os.path.join( proj_dict['data_dir'],
'processed',
path_prefix,
'catl_pickle_files',
param_dict['corr_type'],
param_dict['param_str'])
# Creating Folders
cfutils.Path_Folder(pickdir)
## Adding to `proj_dict`
proj_dict['pickdir'] = pickdir
return proj_dict
def sigma_calcs(data_arr, type_sigma='std', perc_arr = [68., 95., 99.7],
return_mean_std=False):
"""
Calcualates the 1-, 2-, and 3-sigma ranges for `data_arr`
Parameters
-----------
data_arr: numpy.ndarray, shape( param_dict['nrpbins'], param_dict['itern_tot'])
array of values, from which to calculate percentiles or St. Dev.
type_sigma: string, optional (default = 'std')
option for calculating either `percentiles` or `standard deviations`
Options:
- 'perc': calculates percentiles
- 'std' : uses standard deviations as 1-, 2-, and 3-sigmas
perc_arr: array_like, optional (default = [68., 95., 99.7])
array of percentiles to calculate
return_mean_std: boolean, optional (default = False)
option for returning mean and St. Dev. along with `sigma_dict`
Return
----------
sigma_dict: python dicitionary
dictionary containg the 1-, 2-, and 3-sigma upper and lower
ranges for `data-arr`
mark_mean: array_like
array of the mean value of `data_arr`.
Only returned if `return_mean_std == True`
mark_std: array_like
array of the St. Dev. value of `data_arr`.
Only returned if `return_mean_std == True`
"""
## Creating dictionary for saving `sigma`s
sigma_dict = {}
for ii in range(len(perc_arr)):
sigma_dict[ii] = []
## Using Percentiles to estimate errors
if type_sigma=='perc':
for ii, perc_ii in enumerate(perc_arr):
mark_lower = num.nanpercentile(data_arr, 50.-(perc_ii/2.),axis=1)
mark_upper = num.nanpercentile(data_arr, 50.+(perc_ii/2.),axis=1)
# Saving to dictionary
sigma_dict[ii] = num.column_stack((mark_lower, mark_upper)).T
## Using standard deviations to estimate errors
if type_sigma=='std':
mean_val = num.nanmean(data_arr, axis=1)
std_val = num.nanstd( data_arr, axis=1)
for ii in range(len(perc_arr)):
mark_lower = mean_val - ((ii+1) * std_val)
mark_upper = mean_val + ((ii+1) * std_val)
# Saving to dictionary
sigma_dict[ii] = num.column_stack((mark_lower, mark_upper)).T
##
## Estimating mean and St. Dev. of `data_arr`
mark_mean = num.nanmean(data_arr, axis=1)
mark_std = num.nanstd (data_arr, axis=1)
if return_mean_std:
return sigma_dict, mark_mean, mark_std
else:
return sigma_dict
## --------- Analysis functions ------------##
def frac_prop_calc(df_bin_org, prop, param_dict, catl_keys_dict):
"""
Computes the quenched fractions of satellites in a given mass bin.
Parameters
----------
df_bin_org: pandas DataFrame
Dataframe for the selected group/halo mass bin
prop: string
galaxy property being evaluated
param_dict: python dictionary
dictionary with input parameters and values
catl_keys_dict: python dictionary
dictionary containing keys for the galaxy properties in catalogue
Returns
----------
"""
## Program message
Prog_msg = param_dict['Prog_msg']
## Constants
Cens = int(1)
Sats = int(0)
itern = param_dict['itern_tot']
## Catalogue Variables for galaxy properties
gm_key = catl_keys_dict['gm_key']
id_key = catl_keys_dict['id_key']
galtype_key = catl_keys_dict['galtype_key']
## Group statistics
groupid_unq = df_bin_org[id_key].unique()
ngroups = groupid_unq.shape[0]
##
## Selecting columns
df_bin_mod = df_bin_org.copy()[[galtype_key, id_key, prop]]
##
## Normalizing `prop` by the `prop_lim`
df_bin_mod.loc[:,prop] /= param_dict['prop_lim'][prop]
##
## Determining galaxy fractions for `df_bin_mod`
cens_pd_org = df_bin_mod.loc[(df_bin_mod[galtype_key]==Cens)].copy().reset_index()
sats_pd_org = df_bin_mod.loc[(df_bin_mod[galtype_key]==Sats)].copy().reset_index()
##
## Quench Satellite fraction
sat_quenched_frac = frac_stat_calc( cens_pd_org ,
sats_pd_org ,
prop ,
catl_keys_dict,
param_dict ,
frac_stat=param_dict['frac_stat'])
##
## Converting `sat_quenched_frac` to numpy array
sat_quenched_frac = sat_quenched_frac
##
## Calculation fractions for Shuffles
sat_quenched_frac_sh = num.zeros((param_dict['itern_tot'],))
# ProgressBar properties
if param_dict['prog_bar']:
widgets = [Bar('>'), ' ', ETA(), ' ', ReverseBar('<')]
pbar_mock = ProgressBar( widgets=widgets, maxval= 10 * itern).start()
## Iterating `itern` times and calculating quenched fractions
for ii in range(itern):
## Quenched fractions
sat_quenched_frac_sh[ii] = frac_stat_calc(cens_pd_org ,
sats_pd_org ,
prop ,
catl_keys_dict,
param_dict ,
shuffle=True ,
frac_stat=param_dict['frac_stat'])
if param_dict['prog_bar']:
pbar_mock.update(ii)
if param_dict['prog_bar']:
pbar_mock.finish()
return sat_quenched_frac, sat_quenched_frac_sh.T
def frac_stat_calc(cens_pd_org, sats_pd_org, prop, catl_keys_dict, param_dict,
frac_stat='diff', shuffle=False):
"""
Computes quenched fractions of satellites for a given galaxy property
`prop` in a given mass bin.
Parameters
----------
cens_pd_org: pandas DataFrame
dataframe with only central galaxies in the given group mass bin.
Centrals belong to groups with galaxies >= `param_dict['ngals_min']`
sats_pd_org: pandas DataFrame
dataframe with only satellite galaxies in the given group mass bin.
Satellites belong to groups with galaxies >= `param_dict['ngals_min']`
prop: string
galaxy property being analyzed
catl_keys_dict: python dictionary
dictionary containing keys for the galaxy properties in catalogue
param_dict: python dictionary
dictionary with input parameters and values
frac_stat: string, optional (default = 'diff')
statistics to use to evaluate the conformity signal
Options:
- 'diff' : Takes the difference between P(sat=q|cen=q) and
P(sat=q|cen=act)
- 'ratio': Takes the ratio between P(sat=q|cen=q) and
P(sat=q|cen=act)
shuffle: boolean, optional (default = False)
option for shuffling the galaxies' properties among themselves, i.e.
centrals among centrals, and satellites among satellites.
Returns
-------
frac_sat_pas_cen_act: float
number of `passive` satellites around `active` centrals over the
total number of satelltes around `active` centrals
frac_sat_pas_cen_pas: float
number of `passive` satellites around `passive` centrals over the
total number of satelltes around `passive` centrals
frac_stat: float
'ratio' or 'difference' of between P(sat=q|cen=q) and P(sat=q|cen=act)
"""
## Keys for group/halo ID, mass, and galaxy type
gm_key = catl_keys_dict['gm_key']
id_key = catl_keys_dict['id_key']
galtype_key = catl_keys_dict['galtype_key']
## Copies of `cens_pd_org` and `sats_pd_org`
cens_pd = cens_pd_org.copy()
sats_pd = sats_pd_org.copy()
## Choosing if to shuffle `prop`
if shuffle:
## Choosing which kind of shuffle to use
# Shuffling only Centrals
if param_dict['shuffle_marks'] == 'cen_sh':
cens_prop_sh = cens_pd[prop].copy().values
num.random.shuffle(cens_prop_sh)
cens_pd.loc[:,prop] = cens_prop_sh
# Shuffling only Satellites
if param_dict['shuffle_marks'] == 'sat_sh':
sats_prop_sh = sats_pd[prop].copy().values
num.random.shuffle(sats_prop_sh)
sats_pd.loc[:,prop] = sats_prop_sh
# Shuffling both Centrals and Satellites
if param_dict['shuffle_marks'] == 'censat_sh':
# Centrals
cens_prop_sh = cens_pd[prop].copy().values
num.random.shuffle(cens_prop_sh)
cens_pd.loc[:,prop] = cens_prop_sh
# Satellites
sats_prop_sh = sats_pd[prop].copy().values
| num.random.shuffle(sats_prop_sh) | numpy.random.shuffle |
"""
Synopsis: A binder for enabling this package using numpy arrays.
Author: <NAME> <<EMAIL>, <EMAIL>>
"""
from ctypes import cdll, POINTER, c_int, c_double, byref
import numpy as np
import ctypes
import pandas as pd
from numpy.ctypeslib import ndpointer
lib = cdll.LoadLibrary("./miniball_python.so")
def miniball(val):
"""
Computes the miniball.
input: val, a 2D numpy-array with points as rows, features as columns.
output: a dict containing:
- center: a 1D numpy-vector with the center of the miniball.
- radius: The radius.
- radius_squared. The radius squared.
"""
if isinstance(val, pd.DataFrame):
val = val.values
assert isinstance(val, np.ndarray)
if val.flags["C_CONTIGUOUS"] is False:
val = val.copy(order="C")
a = c_double(0)
b = c_double(0)
lib.miniball.argtypes = [
ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"),
c_int,
c_int,
POINTER(c_double),
POINTER(ctypes.c_double),
]
rows = int(val.shape[0])
cols = int(val.shape[1])
lib.miniball.restype = POINTER(ctypes.c_double * val.shape[1])
center = lib.miniball(val, rows, cols, byref(a), byref(b))
return {
"center": np.array([i for i in center.contents]),
"radius": a.value,
"radius_squared": b.value,
}
if __name__ == "__main__":
print(
miniball( | np.array([[3.0, 1.0], [3.0, 1.0], [1.0, 0.0]], dtype=np.double) | numpy.array |
"""
Defines classes with represent SPAM operations, along with supporting
functionality.
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import numpy as _np
import scipy.sparse as _sps
import collections as _collections
import numbers as _numbers
import itertools as _itertools
import functools as _functools
import copy as _copy
from .. import optimize as _opt
from ..tools import matrixtools as _mt
from ..tools import optools as _gt
from ..tools import basistools as _bt
from ..tools import listtools as _lt
from ..tools import slicetools as _slct
from ..tools import compattools as _compat
from ..tools import symplectic as _symp
from .basis import Basis as _Basis
from .protectedarray import ProtectedArray as _ProtectedArray
from . import modelmember as _modelmember
from . import term as _term
from . import stabilizer as _stabilizer
from .polynomial import Polynomial as _Polynomial
from . import replib
from .opcalc import bulk_eval_compact_polys_complex as _bulk_eval_compact_polys_complex
IMAG_TOL = 1e-8 # tolerance for imaginary part being considered zero
def optimize_spamvec(vecToOptimize, targetVec):
"""
Optimize the parameters of vecToOptimize so that the
the resulting SPAM vector is as close as possible to
targetVec.
This is trivial for the case of FullSPAMVec
instances, but for other types of parameterization
this involves an iterative optimization over all the
parameters of vecToOptimize.
Parameters
----------
vecToOptimize : SPAMVec
The vector to optimize. This object gets altered.
targetVec : SPAMVec
The SPAM vector used as the target.
Returns
-------
None
"""
#TODO: cleanup this code:
if isinstance(vecToOptimize, StaticSPAMVec):
return # nothing to optimize
if isinstance(vecToOptimize, FullSPAMVec):
if(targetVec.dim != vecToOptimize.dim): # special case: gates can have different overall dimension
vecToOptimize.dim = targetVec.dim # this is a HACK to allow model selection code to work correctly
vecToOptimize.set_value(targetVec) # just copy entire overall matrix since fully parameterized
return
assert(targetVec.dim == vecToOptimize.dim) # vectors must have the same overall dimension
targetVector = _np.asarray(targetVec)
def _objective_func(param_vec):
vecToOptimize.from_vector(param_vec)
return _mt.frobeniusnorm(vecToOptimize - targetVector)
x0 = vecToOptimize.to_vector()
minSol = _opt.minimize(_objective_func, x0, method='BFGS', maxiter=10000, maxfev=10000,
tol=1e-6, callback=None)
vecToOptimize.from_vector(minSol.x)
#print("DEBUG: optimized vector to min frobenius distance %g" % _mt.frobeniusnorm(vecToOptimize-targetVector))
def convert(spamvec, toType, basis, extra=None):
"""
Convert SPAM vector to a new type of parameterization, potentially
creating a new SPAMVec object. Raises ValueError for invalid conversions.
Parameters
----------
spamvec : SPAMVec
SPAM vector to convert
toType : {"full","TP","static","static unitary","clifford",LINDBLAD}
The type of parameterizaton to convert to. "LINDBLAD" is a placeholder
for the various Lindblad parameterization types. See
:method:`Model.set_all_parameterizations` for more details.
basis : {'std', 'gm', 'pp', 'qt'} or Basis object
The basis for `spamvec`. Allowed values are Matrix-unit (std),
Gell-Mann (gm), Pauli-product (pp), and Qutrit (qt)
(or a custom basis object).
extra : object, optional
Additional information for conversion.
Returns
-------
SPAMVec
The converted SPAM vector, usually a distinct
object from the object passed as input.
"""
if toType == "full":
if isinstance(spamvec, FullSPAMVec):
return spamvec # no conversion necessary
else:
typ = spamvec._prep_or_effect if isinstance(spamvec, SPAMVec) else "prep"
return FullSPAMVec(spamvec.todense(), typ=typ)
elif toType == "TP":
if isinstance(spamvec, TPSPAMVec):
return spamvec # no conversion necessary
else:
return TPSPAMVec(spamvec.todense())
# above will raise ValueError if conversion cannot be done
elif toType == "TrueCPTP": # a non-lindbladian CPTP spamvec that hasn't worked well...
if isinstance(spamvec, CPTPSPAMVec):
return spamvec # no conversion necessary
else:
return CPTPSPAMVec(spamvec, basis)
# above will raise ValueError if conversion cannot be done
elif toType == "static":
if isinstance(spamvec, StaticSPAMVec):
return spamvec # no conversion necessary
else:
typ = spamvec._prep_or_effect if isinstance(spamvec, SPAMVec) else "prep"
return StaticSPAMVec(spamvec, typ=typ)
elif toType == "static unitary":
dmvec = _bt.change_basis(spamvec.todense(), basis, 'std')
purevec = _gt.dmvec_to_state(dmvec)
return StaticSPAMVec(purevec, "statevec", spamvec._prep_or_effect)
elif _gt.is_valid_lindblad_paramtype(toType):
if extra is None:
purevec = spamvec # right now, we don't try to extract a "closest pure vec"
# to spamvec - below will fail if spamvec isn't pure.
else:
purevec = extra # assume extra info is a pure vector
nQubits = _np.log2(spamvec.dim) / 2.0
bQubits = bool(abs(nQubits - round(nQubits)) < 1e-10) # integer # of qubits?
proj_basis = "pp" if (basis == "pp" or bQubits) else basis
typ = spamvec._prep_or_effect if isinstance(spamvec, SPAMVec) else "prep"
return LindbladSPAMVec.from_spamvec_obj(
spamvec, typ, toType, None, proj_basis, basis,
truncate=True, lazy=True)
elif toType == "clifford":
if isinstance(spamvec, StabilizerSPAMVec):
return spamvec # no conversion necessary
purevec = spamvec.flatten() # assume a pure state (otherwise would
# need to change Model dim)
return StabilizerSPAMVec.from_dense_purevec(purevec)
else:
raise ValueError("Invalid toType argument: %s" % toType)
def _convert_to_lindblad_base(vec, typ, new_evotype, mxBasis="pp"):
"""
Attempts to convert `vec` to a static (0 params) SPAMVec with
evoution type `new_evotype`. Used to convert spam vecs to
being LindbladSPAMVec objects.
"""
if vec._evotype == new_evotype and vec.num_params() == 0:
return vec # no conversion necessary
if new_evotype == "densitymx":
return StaticSPAMVec(vec.todense(), "densitymx", typ)
if new_evotype in ("svterm", "cterm"):
if isinstance(vec, ComputationalSPAMVec): # special case when conversion is easy
return ComputationalSPAMVec(vec._zvals, new_evotype, typ)
elif vec._evotype == "densitymx":
# then try to extract a (static) pure state from vec wth
# evotype 'statevec' or 'stabilizer' <=> 'svterm', 'cterm'
if isinstance(vec, DenseSPAMVec):
dmvec = _bt.change_basis(vec, mxBasis, 'std')
purestate = StaticSPAMVec(_gt.dmvec_to_state(dmvec), 'statevec', typ)
elif isinstance(vec, PureStateSPAMVec):
purestate = vec.pure_state_vec # evotype 'statevec'
else:
raise ValueError("Unable to obtain pure state from density matrix type %s!" % type(vec))
if new_evotype == "cterm": # then purestate 'statevec' => 'stabilizer' (if possible)
if typ == "prep":
purestate = StabilizerSPAMVec.from_dense_purevec(purestate.todense())
else: # type == "effect"
purestate = StabilizerEffectVec.from_dense_purevec(purestate.todense())
return PureStateSPAMVec(purestate, new_evotype, mxBasis, typ)
raise ValueError("Could not convert %s (evotype %s) to %s w/0 params!" %
(str(type(vec)), vec._evotype, new_evotype))
def finite_difference_deriv_wrt_params(spamvec, wrtFilter=None, eps=1e-7):
"""
Computes a finite-difference Jacobian for a SPAMVec object.
The returned value is a matrix whose columns are the vectorized
derivatives of the spam vector with respect to a single
parameter, matching the format expected from the spam vectors's
`deriv_wrt_params` method.
Parameters
----------
spamvec : SPAMVec
The spam vector object to compute a Jacobian for.
eps : float, optional
The finite difference step to use.
Returns
-------
numpy.ndarray
An M by N matrix where M is the number of gate elements and
N is the number of gate parameters.
"""
dim = spamvec.get_dimension()
spamvec2 = spamvec.copy()
p = spamvec.to_vector()
fd_deriv = _np.empty((dim, spamvec.num_params()), 'd') # assume real (?)
for i in range(spamvec.num_params()):
p_plus_dp = p.copy()
p_plus_dp[i] += eps
spamvec2.from_vector(p_plus_dp, close=True)
fd_deriv[:, i:i + 1] = (spamvec2 - spamvec) / eps
fd_deriv.shape = [dim, spamvec.num_params()]
if wrtFilter is None:
return fd_deriv
else:
return _np.take(fd_deriv, wrtFilter, axis=1)
def check_deriv_wrt_params(spamvec, deriv_to_check=None, wrtFilter=None, eps=1e-7):
"""
Checks the `deriv_wrt_params` method of a SPAMVec object.
This routine is meant to be used as an aid in testing and debugging
SPAMVec classes by comparing the finite-difference Jacobian that
*should* be returned by `spamvec.deriv_wrt_params` with the one that
actually is. A ValueError is raised if the two do not match.
Parameters
----------
spamvec : SPAMVec
The gate object to test.
deriv_to_check : numpy.ndarray or None, optional
If not None, the Jacobian to compare against the finite difference
result. If None, `spamvec.deriv_wrt_parms()` is used. Setting this
argument can be useful when the function is called *within* a LinearOperator
class's `deriv_wrt_params()` method itself as a part of testing.
eps : float, optional
The finite difference step to use.
Returns
-------
None
"""
fd_deriv = finite_difference_deriv_wrt_params(spamvec, wrtFilter, eps)
if deriv_to_check is None:
deriv_to_check = spamvec.deriv_wrt_params()
#print("Deriv shapes = %s and %s" % (str(fd_deriv.shape),
# str(deriv_to_check.shape)))
#print("finite difference deriv = \n",fd_deriv)
#print("deriv_wrt_params deriv = \n",deriv_to_check)
#print("deriv_wrt_params - finite diff deriv = \n",
# deriv_to_check - fd_deriv)
for i in range(deriv_to_check.shape[0]):
for j in range(deriv_to_check.shape[1]):
diff = abs(deriv_to_check[i, j] - fd_deriv[i, j])
if diff > 5 * eps:
print("deriv_chk_mismatch: (%d,%d): %g (comp) - %g (fd) = %g" %
(i, j, deriv_to_check[i, j], fd_deriv[i, j], diff))
if _np.linalg.norm(fd_deriv - deriv_to_check) > 100 * eps:
raise ValueError("Failed check of deriv_wrt_params:\n"
" norm diff = %g" %
_np.linalg.norm(fd_deriv - deriv_to_check))
class SPAMVec(_modelmember.ModelMember):
"""
Excapulates a parameterization of a state preparation OR POVM effect
vector. This class is the common base class for all specific
parameterizations of a SPAM vector.
"""
def __init__(self, rep, evotype, typ):
""" Initialize a new SPAM Vector """
if isinstance(rep, int): # For operators that have no representation themselves (term ops)
dim = rep # allow passing an integer as `rep`.
rep = None
else:
dim = rep.dim
super(SPAMVec, self).__init__(dim, evotype)
self._rep = rep
self._prep_or_effect = typ
@property
def size(self):
"""
Return the number of independent elements in this gate (when viewed as a dense array)
"""
return self.dim
@property
def outcomes(self):
"""
Return the z-value outcomes corresponding to this effect SPAM vector
in the context of a stabilizer-state simulation.
"""
raise NotImplementedError("'outcomes' property is not implemented for %s objects" % self.__class__.__name__)
def set_value(self, vec):
"""
Attempts to modify SPAMVec parameters so that the specified raw
SPAM vector becomes vec. Will raise ValueError if this operation
is not possible.
Parameters
----------
vec : array_like or SPAMVec
A numpy array representing a SPAM vector, or a SPAMVec object.
Returns
-------
None
"""
raise ValueError("Cannot set the value of a %s directly!" % self.__class__.__name__)
def set_time(self, t):
"""
Sets the current time for a time-dependent operator. For time-independent
operators (the default), this function does absolutely nothing.
Parameters
----------
t : float
The current time.
Returns
-------
None
"""
pass
def todense(self, scratch=None):
"""
Return this SPAM vector as a (dense) numpy array. The memory
in `scratch` maybe used when it is not-None.
"""
raise NotImplementedError("todense(...) not implemented for %s objects!" % self.__class__.__name__)
# def torep(self, typ, outrep=None):
# """
# Return a "representation" object for this SPAM vector.
# Such objects are primarily used internally by pyGSTi to compute
# things like probabilities more efficiently.
#
# Parameters
# ----------
# typ : {'prep','effect'}
# The type of representation (for cases when the vector type is
# not already defined).
#
# outrep : StateRep
# If not None, an existing state representation appropriate to this
# SPAM vector that may be used instead of allocating a new one.
#
# Returns
# -------
# StateRep
# """
# if typ == "prep":
# if self._evotype == "statevec":
# return replib.SVStateRep(self.todense())
# elif self._evotype == "densitymx":
# return replib.DMStateRep(self.todense())
# raise NotImplementedError("torep(%s) not implemented for %s objects!" %
# (self._evotype, self.__class__.__name__))
# elif typ == "effect":
# if self._evotype == "statevec":
# return replib.SVEffectRep_Dense(self.todense())
# elif self._evotype == "densitymx":
# return replib.DMEffectRep_Dense(self.todense())
# raise NotImplementedError("torep(%s) not implemented for %s objects!" %
# (self._evotype, self.__class__.__name__))
# else:
# raise ValueError("Invalid `typ` argument for torep(): %s" % typ)
def get_taylor_order_terms(self, order, max_poly_vars=100, return_poly_coeffs=False):
"""
Get the `order`-th order Taylor-expansion terms of this SPAM vector.
This function either constructs or returns a cached list of the terms at
the given order. Each term is "rank-1", meaning that it is a state
preparation followed by or POVM effect preceded by actions on a
density matrix `rho` of the form:
`rho -> A rho B`
The coefficients of these terms are typically polynomials of the
SPAMVec's parameters, where the polynomial's variable indices index the
*global* parameters of the SPAMVec's parent (usually a :class:`Model`)
, not the SPAMVec's local parameter array (i.e. that returned from
`to_vector`).
Parameters
----------
order : int
The order of terms to get.
return_coeff_polys : bool
Whether a parallel list of locally-indexed (using variable indices
corresponding to *this* object's parameters rather than its parent's)
polynomial coefficients should be returned as well.
Returns
-------
terms : list
A list of :class:`RankOneTerm` objects.
coefficients : list
Only present when `return_coeff_polys == True`.
A list of *compact* polynomial objects, meaning that each element
is a `(vtape,ctape)` 2-tuple formed by concatenating together the
output of :method:`Polynomial.compact`.
"""
raise NotImplementedError("get_taylor_order_terms(...) not implemented for %s objects!" %
self.__class__.__name__)
def get_highmagnitude_terms(self, min_term_mag, force_firstorder=True, max_taylor_order=3, max_poly_vars=100):
"""
Get the terms (from a Taylor expansion of this SPAM vector) that have
magnitude above `min_term_mag` (the magnitude of a term is taken to
be the absolute value of its coefficient), considering only those
terms up to some maximum Taylor expansion order, `max_taylor_order`.
Note that this function also *sets* the magnitudes of the returned
terms (by calling `term.set_magnitude(...)`) based on the current
values of this SPAM vector's parameters. This is an essential step
to using these terms in pruned-path-integral calculations later on.
Parameters
----------
min_term_mag : float
the threshold for term magnitudes: only terms with magnitudes above
this value are returned.
force_firstorder : bool, optional
if True, then always return all the first-order Taylor-series terms,
even if they have magnitudes smaller than `min_term_mag`. This
behavior is needed for using GST with pruned-term calculations, as
we may begin with a guess model that has no error (all terms have
zero magnitude!) and still need to compute a meaningful jacobian at
this point.
max_taylor_order : int, optional
the maximum Taylor-order to consider when checking whether term-
magnitudes exceed `min_term_mag`.
Returns
-------
highmag_terms : list
A list of the high-magnitude terms that were found. These
terms are *sorted* in descending order by term-magnitude.
first_order_indices : list
A list of the indices into `highmag_terms` that mark which
of these terms are first-order Taylor terms (useful when
we're forcing these terms to always be present).
"""
#NOTE: SAME as for LinearOperator class -- TODO consolidate in FUTURE
#print("DB: SPAM get_high_magnitude_terms")
v = self.to_vector()
taylor_order = 0
terms = []; last_len = -1; first_order_magmax = 1.0
while len(terms) > last_len: # while we keep adding something
if taylor_order > 1 and first_order_magmax**taylor_order < min_term_mag:
break # there's no way any terms at this order reach min_term_mag - exit now!
MAX_CACHED_TERM_ORDER = 1
if taylor_order <= MAX_CACHED_TERM_ORDER:
#print("order ",taylor_order," : ",len(terms), "terms")
terms_at_order, cpolys = self.get_taylor_order_terms(taylor_order, max_poly_vars, True)
coeffs = _bulk_eval_compact_polys_complex(
cpolys[0], cpolys[1], v, (len(terms_at_order),)) # an array of coeffs
mags = _np.abs(coeffs)
last_len = len(terms)
#OLD: terms_at_order = [ t.copy_with_magnitude(abs(coeff)) for coeff, t in zip(coeffs, terms_at_order) ]
if taylor_order == 1:
#OLD: first_order_magmax = max([t.magnitude for t in terms_at_order])
first_order_magmax = max(mags)
if force_firstorder:
terms.extend([(taylor_order, t.copy_with_magnitude(mag))
for coeff, mag, t in zip(coeffs, mags, terms_at_order)])
else:
for mag, t in zip(mags, terms_at_order):
if mag >= min_term_mag:
terms.append((taylor_order, t.copy_with_magnitude(mag)))
else:
for mag, t in zip(mags, terms_at_order):
if mag >= min_term_mag:
terms.append((taylor_order, t.copy_with_magnitude(mag)))
else:
terms.extend([(taylor_order, t) for t in
self.get_taylor_order_terms_above_mag(taylor_order,
max_poly_vars, min_term_mag)])
taylor_order += 1
if taylor_order > max_taylor_order: break
#Sort terms based on magnitude
sorted_terms = sorted(terms, key=lambda t: t[1].magnitude, reverse=True)
first_order_indices = [i for i, t in enumerate(sorted_terms) if t[0] == 1]
return [t[1] for t in sorted_terms], first_order_indices
def get_taylor_order_terms_above_mag(self, order, max_poly_vars, min_term_mag):
""" TODO: docstring """
v = self.to_vector()
terms_at_order, cpolys = self.get_taylor_order_terms(order, max_poly_vars, True)
coeffs = _bulk_eval_compact_polys_complex(
cpolys[0], cpolys[1], v, (len(terms_at_order),)) # an array of coeffs
terms_at_order = [t.copy_with_magnitude(abs(coeff)) for coeff, t in zip(coeffs, terms_at_order)]
return [t for t in terms_at_order if t.magnitude >= min_term_mag]
def frobeniusdist2(self, otherSpamVec, typ, transform=None,
inv_transform=None):
"""
Return the squared frobenius difference between this spam vector and
`otherSpamVec`, optionally transforming this vector first using
`transform` and `inv_transform` (depending on the value of `typ`).
Parameters
----------
otherSpamVec : SPAMVec
The other spam vector
typ : { 'prep', 'effect' }
Which type of SPAM vector is being transformed.
transform, inv_transform : numpy.ndarray
The transformation (if not None) to be performed.
Returns
-------
float
"""
vec = self.todense()
if typ == 'prep':
if inv_transform is None:
return _gt.frobeniusdist2(vec, otherSpamVec.todense())
else:
return _gt.frobeniusdist2(_np.dot(inv_transform, vec),
otherSpamVec.todense())
elif typ == "effect":
if transform is None:
return _gt.frobeniusdist2(vec, otherSpamVec.todense())
else:
return _gt.frobeniusdist2(_np.dot(_np.transpose(transform),
vec), otherSpamVec.todense())
else: raise ValueError("Invalid 'typ' argument: %s" % typ)
def residuals(self, otherSpamVec, typ, transform=None, inv_transform=None):
"""
Return a vector of residuals between this spam vector and
`otherSpamVec`, optionally transforming this vector first using
`transform` and `inv_transform` (depending on the value of `typ`).
Parameters
----------
otherSpamVec : SPAMVec
The other spam vector
typ : { 'prep', 'effect' }
Which type of SPAM vector is being transformed.
transform, inv_transform : numpy.ndarray
The transformation (if not None) to be performed.
Returns
-------
float
"""
vec = self.todense()
if typ == 'prep':
if inv_transform is None:
return _gt.residuals(vec, otherSpamVec.todense())
else:
return _gt.residuals(_np.dot(inv_transform, vec),
otherSpamVec.todense())
elif typ == "effect":
if transform is None:
return _gt.residuals(vec, otherSpamVec.todense())
else:
return _gt.residuals(_np.dot(_np.transpose(transform),
vec), otherSpamVec.todense())
def transform(self, S, typ):
"""
Update SPAM (column) vector V as inv(S) * V or S^T * V for preparation
or effect SPAM vectors, respectively.
Note that this is equivalent to state preparation vectors getting
mapped: `rho -> inv(S) * rho` and the *transpose* of effect vectors
being mapped as `E^T -> E^T * S`.
Generally, the transform function updates the *parameters* of
the SPAM vector such that the resulting vector is altered as
described above. If such an update cannot be done (because
the gate parameters do not allow for it), ValueError is raised.
Parameters
----------
S : GaugeGroupElement
A gauge group element which specifies the "S" matrix
(and it's inverse) used in the above similarity transform.
typ : { 'prep', 'effect' }
Which type of SPAM vector is being transformed (see above).
"""
if typ == 'prep':
Si = S.get_transform_matrix_inverse()
self.set_value(_np.dot(Si, self.todense()))
elif typ == 'effect':
Smx = S.get_transform_matrix()
self.set_value(_np.dot(_np.transpose(Smx), self.todense()))
#Evec^T --> ( Evec^T * S )^T
else:
raise ValueError("Invalid typ argument: %s" % typ)
def depolarize(self, amount):
"""
Depolarize this SPAM vector by the given `amount`.
Generally, the depolarize function updates the *parameters* of
the SPAMVec such that the resulting vector is depolarized. If
such an update cannot be done (because the gate parameters do not
allow for it), ValueError is raised.
Parameters
----------
amount : float or tuple
The amount to depolarize by. If a tuple, it must have length
equal to one less than the dimension of the gate. All but the
first element of the spam vector (often corresponding to the
identity element) are multiplied by `amount` (if a float) or
the corresponding `amount[i]` (if a tuple).
Returns
-------
None
"""
if isinstance(amount, float) or _compat.isint(amount):
D = _np.diag([1] + [1 - amount] * (self.dim - 1))
else:
assert(len(amount) == self.dim - 1)
D = _np.diag([1] + list(1.0 - _np.array(amount, 'd')))
self.set_value(_np.dot(D, self.todense()))
def num_params(self):
"""
Get the number of independent parameters which specify this SPAM vector.
Returns
-------
int
the number of independent parameters.
"""
return 0 # no parameters
def to_vector(self):
"""
Get the SPAM vector parameters as an array of values.
Returns
-------
numpy array
The parameters as a 1D array with length num_params().
"""
return _np.array([], 'd') # no parameters
def from_vector(self, v, close=False, nodirty=False):
"""
Initialize the SPAM vector using a 1D array of parameters.
Parameters
----------
v : numpy array
The 1D vector of gate parameters. Length
must == num_params()
Returns
-------
None
"""
assert(len(v) == 0) # should be no parameters, and nothing to do
def deriv_wrt_params(self, wrtFilter=None):
"""
Construct a matrix whose columns are the derivatives of the SPAM vector
with respect to a single param. Thus, each column is of length
get_dimension and there is one column per SPAM vector parameter.
An empty 2D array in the StaticSPAMVec case (num_params == 0).
Returns
-------
numpy array
Array of derivatives, shape == (dimension, num_params)
"""
dtype = complex if self._evotype == 'statevec' else 'd'
derivMx = _np.zeros((self.dim, 0), dtype)
if wrtFilter is None:
return derivMx
else:
return _np.take(derivMx, wrtFilter, axis=1)
def has_nonzero_hessian(self):
"""
Returns whether this SPAM vector has a non-zero Hessian with
respect to its parameters, i.e. whether it only depends
linearly on its parameters or not.
Returns
-------
bool
"""
#Default: assume Hessian can be nonzero if there are any parameters
return self.num_params() > 0
def hessian_wrt_params(self, wrtFilter1=None, wrtFilter2=None):
"""
Construct the Hessian of this SPAM vector with respect to its parameters.
This function returns a tensor whose first axis corresponds to the
flattened operation matrix and whose 2nd and 3rd axes correspond to the
parameters that are differentiated with respect to.
Parameters
----------
wrtFilter1, wrtFilter2 : list
Lists of indices of the paramters to take first and second
derivatives with respect to. If None, then derivatives are
taken with respect to all of the vectors's parameters.
Returns
-------
numpy array
Hessian with shape (dimension, num_params1, num_params2)
"""
if not self.has_nonzero_hessian():
return _np.zeros(self.size, self.num_params(), self.num_params())
# FUTURE: create a finite differencing hessian method?
raise NotImplementedError("hessian_wrt_params(...) is not implemented for %s objects" % self.__class__.__name__)
#Note: no __str__ fn
@staticmethod
def convert_to_vector(V):
"""
Static method that converts a vector-like object to a 2D numpy
dim x 1 column array.
Parameters
----------
V : array_like
Returns
-------
numpy array
"""
if isinstance(V, SPAMVec):
vector = V.todense().copy()
vector.shape = (vector.size, 1)
elif isinstance(V, _np.ndarray):
vector = V.copy()
if len(vector.shape) == 1: # convert (N,) shape vecs to (N,1)
vector.shape = (vector.size, 1)
else:
try:
len(V)
# XXX this is an abuse of exception handling
except:
raise ValueError("%s doesn't look like an array/list" % V)
try:
d2s = [len(row) for row in V]
except TypeError: # thrown if len(row) fails because no 2nd dim
d2s = None
if d2s is not None:
if any([len(row) != 1 for row in V]):
raise ValueError("%s is 2-dimensional but 2nd dim != 1" % V)
typ = 'd' if _np.all(_np.isreal(V)) else 'complex'
try:
vector = _np.array(V, typ) # vec is already a 2-D column vector
except TypeError:
raise ValueError("%s doesn't look like an array/list" % V)
else:
typ = 'd' if _np.all(_np.isreal(V)) else 'complex'
vector = _np.array(V, typ)[:, None] # make into a 2-D column vec
assert(len(vector.shape) == 2 and vector.shape[1] == 1)
return vector.flatten() # HACK for convention change -> (N,) instead of (N,1)
class DenseSPAMVec(SPAMVec):
"""
Excapulates a parameterization of a state preparation OR POVM effect
vector. This class is the common base class for all specific
parameterizations of a SPAM vector.
"""
def __init__(self, vec, evotype, prep_or_effect):
""" Initialize a new SPAM Vector """
dtype = complex if evotype == "statevec" else 'd'
vec = _np.asarray(vec, dtype=dtype)
vec.shape = (vec.size,) # just store 1D array flatten
vec = _np.require(vec, requirements=['OWNDATA', 'C_CONTIGUOUS'])
if prep_or_effect == "prep":
if evotype == "statevec":
rep = replib.SVStateRep(vec)
elif evotype == "densitymx":
rep = replib.DMStateRep(vec)
else:
raise ValueError("Invalid evotype for DenseSPAMVec: %s" % evotype)
elif prep_or_effect == "effect":
if evotype == "statevec":
rep = replib.SVEffectRep_Dense(vec)
elif evotype == "densitymx":
rep = replib.DMEffectRep_Dense(vec)
else:
raise ValueError("Invalid evotype for DenseSPAMVec: %s" % evotype)
else:
raise ValueError("Invalid `prep_or_effect` argument: %s" % prep_or_effect)
super(DenseSPAMVec, self).__init__(rep, evotype, prep_or_effect)
assert(self.base1D.flags['C_CONTIGUOUS'] and self.base1D.flags['OWNDATA'])
def todense(self, scratch=None):
"""
Return this SPAM vector as a (dense) numpy array. The memory
in `scratch` maybe used when it is not-None.
"""
#don't use scratch since we already have memory allocated
return self.base1D # *must* be a numpy array for Cython arg conversion
@property
def base1D(self):
return self._rep.base
@property
def base(self):
bv = self.base1D.view()
bv.shape = (bv.size, 1) # 'base' is by convention a (N,1)-shaped array
return bv
def __copy__(self):
# We need to implement __copy__ because we defer all non-existing
# attributes to self.base (a numpy array) which *has* a __copy__
# implementation that we don't want to use, as it results in just a
# copy of the numpy array.
cls = self.__class__
cpy = cls.__new__(cls)
cpy.__dict__.update(self.__dict__)
return cpy
def __deepcopy__(self, memo):
# We need to implement __deepcopy__ because we defer all non-existing
# attributes to self.base (a numpy array) which *has* a __deepcopy__
# implementation that we don't want to use, as it results in just a
# copy of the numpy array.
cls = self.__class__
cpy = cls.__new__(cls)
memo[id(self)] = cpy
for k, v in self.__dict__.items():
setattr(cpy, k, _copy.deepcopy(v, memo))
return cpy
#Access to underlying array
def __getitem__(self, key):
self.dirty = True
return self.base.__getitem__(key)
def __getslice__(self, i, j):
self.dirty = True
return self.__getitem__(slice(i, j)) # Called for A[:]
def __setitem__(self, key, val):
self.dirty = True
return self.base.__setitem__(key, val)
def __getattr__(self, attr):
#use __dict__ so no chance for recursive __getattr__
if '_rep' in self.__dict__: # sometimes in loading __getattr__ gets called before the instance is loaded
ret = getattr(self.base, attr)
else:
raise AttributeError("No attribute:", attr)
self.dirty = True
return ret
#Mimic array
def __pos__(self): return self.base
def __neg__(self): return -self.base
def __abs__(self): return abs(self.base)
def __add__(self, x): return self.base + x
def __radd__(self, x): return x + self.base
def __sub__(self, x): return self.base - x
def __rsub__(self, x): return x - self.base
def __mul__(self, x): return self.base * x
def __rmul__(self, x): return x * self.base
def __truediv__(self, x): return self.base / x
def __rtruediv__(self, x): return x / self.base
def __floordiv__(self, x): return self.base // x
def __rfloordiv__(self, x): return x // self.base
def __pow__(self, x): return self.base ** x
def __eq__(self, x): return self.base == x
def __len__(self): return len(self.base)
def __int__(self): return int(self.base)
def __long__(self): return int(self.base)
def __float__(self): return float(self.base)
def __complex__(self): return complex(self.base)
def __str__(self):
s = "%s with dimension %d\n" % (self.__class__.__name__, self.dim)
s += _mt.mx_to_string(self.todense(), width=4, prec=2)
return s
class StaticSPAMVec(DenseSPAMVec):
"""
Encapsulates a SPAM vector that is completely fixed, or "static", meaning
that is contains no parameters.
"""
def __init__(self, vec, evotype="auto", typ="prep"):
"""
Initialize a StaticSPAMVec object.
Parameters
----------
vec : array_like or SPAMVec
a 1D numpy array representing the SPAM operation. The
shape of this array sets the dimension of the SPAM op.
evotype : {"densitymx", "statevec"}
the evolution type being used.
typ : {"prep", "effect"}
whether this is a state preparation or an effect (measurement)
SPAM vector.
"""
vec = SPAMVec.convert_to_vector(vec)
if evotype == "auto":
evotype = "statevec" if _np.iscomplexobj(vec) else "densitymx"
elif evotype == "statevec":
vec = _np.asarray(vec, complex) # ensure all statevec vecs are complex (densitymx could be either?)
assert(evotype in ("statevec", "densitymx")), \
"Invalid evolution type '%s' for %s" % (evotype, self.__class__.__name__)
DenseSPAMVec.__init__(self, vec, evotype, typ)
class FullSPAMVec(DenseSPAMVec):
"""
Encapsulates a SPAM vector that is fully parameterized, that is,
each element of the SPAM vector is an independent parameter.
"""
def __init__(self, vec, evotype="auto", typ="prep"):
"""
Initialize a FullSPAMVec object.
Parameters
----------
vec : array_like or SPAMVec
a 1D numpy array representing the SPAM operation. The
shape of this array sets the dimension of the SPAM op.
evotype : {"densitymx", "statevec"}
the evolution type being used.
typ : {"prep", "effect"}
whether this is a state preparation or an effect (measurement)
SPAM vector.
"""
vec = SPAMVec.convert_to_vector(vec)
if evotype == "auto":
evotype = "statevec" if _np.iscomplexobj(vec) else "densitymx"
assert(evotype in ("statevec", "densitymx")), \
"Invalid evolution type '%s' for %s" % (evotype, self.__class__.__name__)
DenseSPAMVec.__init__(self, vec, evotype, typ)
def set_value(self, vec):
"""
Attempts to modify SPAMVec parameters so that the specified raw
SPAM vector becomes vec. Will raise ValueError if this operation
is not possible.
Parameters
----------
vec : array_like or SPAMVec
A numpy array representing a SPAM vector, or a SPAMVec object.
Returns
-------
None
"""
vec = SPAMVec.convert_to_vector(vec)
if(vec.size != self.dim):
raise ValueError("Argument must be length %d" % self.dim)
self.base1D[:] = vec
self.dirty = True
def num_params(self):
"""
Get the number of independent parameters which specify this SPAM vector.
Returns
-------
int
the number of independent parameters.
"""
return 2 * self.size if self._evotype == "statevec" else self.size
def to_vector(self):
"""
Get the SPAM vector parameters as an array of values.
Returns
-------
numpy array
The parameters as a 1D array with length num_params().
"""
if self._evotype == "statevec":
return _np.concatenate((self.base1D.real, self.base1D.imag), axis=0)
else:
return self.base1D
def from_vector(self, v, close=False, nodirty=False):
"""
Initialize the SPAM vector using a 1D array of parameters.
Parameters
----------
v : numpy array
The 1D vector of gate parameters. Length
must == num_params()
Returns
-------
None
"""
if self._evotype == "statevec":
self.base1D[:] = v[0:self.dim] + 1j * v[self.dim:]
else:
self.base1D[:] = v
if not nodirty: self.dirty = True
def deriv_wrt_params(self, wrtFilter=None):
"""
Construct a matrix whose columns are the derivatives of the SPAM vector
with respect to a single param. Thus, each column is of length
get_dimension and there is one column per SPAM vector parameter.
Returns
-------
numpy array
Array of derivatives, shape == (dimension, num_params)
"""
if self._evotype == "statevec":
derivMx = _np.concatenate((_np.identity(self.dim, complex),
1j * _np.identity(self.dim, complex)), axis=1)
else:
derivMx = _np.identity(self.dim, 'd')
if wrtFilter is None:
return derivMx
else:
return _np.take(derivMx, wrtFilter, axis=1)
def has_nonzero_hessian(self):
"""
Returns whether this SPAM vector has a non-zero Hessian with
respect to its parameters, i.e. whether it only depends
linearly on its parameters or not.
Returns
-------
bool
"""
return False
class TPSPAMVec(DenseSPAMVec):
"""
Encapsulates a SPAM vector that is fully parameterized except for the first
element, which is frozen to be 1/(d**0.25). This is so that, when the SPAM
vector is interpreted in the Pauli or Gell-Mann basis, the represented
density matrix has trace == 1. This restriction is frequently used in
conjuction with trace-preserving (TP) gates.
"""
#Note: here we assume that the first basis element is (1/sqrt(x) * I),
# where I the d-dimensional identity (where len(vector) == d**2). So
# if Tr(basisEl*basisEl) == Tr(1/x*I) == d/x must == 1, then we must
# have x == d. Thus, we multiply this first basis element by
# alpha = 1/sqrt(d) to obtain a trace-1 matrix, i.e., finding alpha
# s.t. Tr(alpha*[1/sqrt(d)*I]) == 1 => alpha*d/sqrt(d) == 1 =>
# alpha = 1/sqrt(d) = 1/(len(vec)**0.25).
def __init__(self, vec):
"""
Initialize a TPSPAMVec object.
Parameters
----------
vec : array_like or SPAMVec
a 1D numpy array representing the SPAM operation. The
shape of this array sets the dimension of the SPAM op.
"""
vector = SPAMVec.convert_to_vector(vec)
firstEl = len(vector)**-0.25
if not _np.isclose(vector[0], firstEl):
raise ValueError("Cannot create TPSPAMVec: "
"first element must equal %g!" % firstEl)
DenseSPAMVec.__init__(self, vec, "densitymx", "prep")
assert(isinstance(self.base, _ProtectedArray))
@property
def base(self):
bv = self.base1D.view()
bv.shape = (bv.size, 1)
return _ProtectedArray(bv, indicesToProtect=(0, 0))
def set_value(self, vec):
"""
Attempts to modify SPAMVec parameters so that the specified raw
SPAM vector becomes vec. Will raise ValueError if this operation
is not possible.
Parameters
----------
vec : array_like or SPAMVec
A numpy array representing a SPAM vector, or a SPAMVec object.
Returns
-------
None
"""
vec = SPAMVec.convert_to_vector(vec)
firstEl = (self.dim)**-0.25
if(vec.size != self.dim):
raise ValueError("Argument must be length %d" % self.dim)
if not _np.isclose(vec[0], firstEl):
raise ValueError("Cannot create TPSPAMVec: "
"first element must equal %g!" % firstEl)
self.base1D[1:] = vec[1:]
self.dirty = True
def num_params(self):
"""
Get the number of independent parameters which specify this SPAM vector.
Returns
-------
int
the number of independent parameters.
"""
return self.dim - 1
def to_vector(self):
"""
Get the SPAM vector parameters as an array of values.
Returns
-------
numpy array
The parameters as a 1D array with length num_params().
"""
return self.base1D[1:] # .real in case of complex matrices?
def from_vector(self, v, close=False, nodirty=False):
"""
Initialize the SPAM vector using a 1D array of parameters.
Parameters
----------
v : numpy array
The 1D vector of gate parameters. Length
must == num_params()
Returns
-------
None
"""
assert(_np.isclose(self.base1D[0], (self.dim)**-0.25))
self.base1D[1:] = v
if not nodirty: self.dirty = True
def deriv_wrt_params(self, wrtFilter=None):
"""
Construct a matrix whose columns are the derivatives of the SPAM vector
with respect to a single param. Thus, each column is of length
get_dimension and there is one column per SPAM vector parameter.
Returns
-------
numpy array
Array of derivatives, shape == (dimension, num_params)
"""
derivMx = _np.identity(self.dim, 'd') # TP vecs assumed real
derivMx = derivMx[:, 1:] # remove first col ( <=> first-el parameters )
if wrtFilter is None:
return derivMx
else:
return _np.take(derivMx, wrtFilter, axis=1)
def has_nonzero_hessian(self):
"""
Returns whether this SPAM vector has a non-zero Hessian with
respect to its parameters, i.e. whether it only depends
linearly on its parameters or not.
Returns
-------
bool
"""
return False
class ComplementSPAMVec(DenseSPAMVec):
"""
Encapsulates a SPAM vector that is parameterized by
`I - sum(other_spam_vecs)` where `I` is a (static) identity element
and `other_param_vecs` is a list of other spam vectors in the same parent
:class:`POVM`. This only *partially* implements the SPAMVec interface
(some methods such as `to_vector` and `from_vector` will thunk down to base
class versions which raise `NotImplementedError`), as instances are meant to
be contained within a :class:`POVM` which takes care of vectorization.
"""
def __init__(self, identity, other_spamvecs):
"""
Initialize a ComplementSPAMVec object.
Parameters
----------
identity : array_like or SPAMVec
a 1D numpy array representing the static identity operation from
which the sum of the other vectors is subtracted.
other_spamvecs : list of SPAMVecs
A list of the "other" parameterized SPAM vectors which are
subtracted from `identity` to compute the final value of this
"complement" SPAM vector.
"""
self.identity = FullSPAMVec(
SPAMVec.convert_to_vector(identity)) # so easy to transform
# or depolarize by parent POVM
self.other_vecs = other_spamvecs
#Note: we assume that our parent will do the following:
# 1) set our gpindices to indicate how many parameters we have
# 2) set the gpindices of the elements of other_spamvecs so
# that they index into our local parameter vector.
DenseSPAMVec.__init__(self, self.identity, "densitymx", "effect") # dummy
self._construct_vector() # reset's self.base
def _construct_vector(self):
self.base1D.flags.writeable = True
self.base1D[:] = self.identity.base1D - sum([vec.base1D for vec in self.other_vecs])
self.base1D.flags.writeable = False
def num_params(self):
"""
Get the number of independent parameters which specify this SPAM vector.
Returns
-------
int
the number of independent parameters.
"""
return len(self.gpindices_as_array())
def to_vector(self):
"""
Get the SPAM vector parameters as an array of values.
Returns
-------
numpy array
The parameters as a 1D array with length num_params().
"""
raise ValueError(("ComplementSPAMVec.to_vector() should never be called"
" - use TPPOVM.to_vector() instead"))
def from_vector(self, v, close=False, nodirty=False):
"""
Initialize this SPAM vector using a vector of its parameters.
Parameters
----------
v : numpy array
The 1D vector of parameters.
Returns
-------
None
"""
#Rely on prior .from_vector initialization of self.other_vecs, so
# we just construct our vector based on them.
#Note: this is needed for finite-differencing in map-based calculator
self._construct_vector()
if not nodirty: self.dirty = True
def deriv_wrt_params(self, wrtFilter=None):
"""
Construct a matrix whose columns are the derivatives of the SPAM vector
with respect to a single param. Thus, each column is of length
get_dimension and there is one column per SPAM vector parameter.
Returns
-------
numpy array
Array of derivatives, shape == (dimension, num_params)
"""
if len(self.other_vecs) == 0: return _np.zeros((self.dim, 0), 'd') # Complement vecs assumed real
Np = len(self.gpindices_as_array())
neg_deriv = _np.zeros((self.dim, Np), 'd')
for ovec in self.other_vecs:
local_inds = _modelmember._decompose_gpindices(
self.gpindices, ovec.gpindices)
#Note: other_vecs are not copies but other *sibling* effect vecs
# so their gpindices index the same space as this complement vec's
# does - so we need to "_decompose_gpindices"
neg_deriv[:, local_inds] += ovec.deriv_wrt_params()
derivMx = -neg_deriv
if wrtFilter is None:
return derivMx
else:
return _np.take(derivMx, wrtFilter, axis=1)
def has_nonzero_hessian(self):
"""
Returns whether this SPAM vector has a non-zero Hessian with
respect to its parameters, i.e. whether it only depends
linearly on its parameters or not.
Returns
-------
bool
"""
return False
class CPTPSPAMVec(DenseSPAMVec):
"""
Encapsulates a SPAM vector that is parameterized through the Cholesky
decomposition of it's standard-basis representation as a density matrix
(not a Liouville vector). The resulting SPAM vector thus represents a
positive density matrix, and additional constraints on the parameters
also guarantee that the trace == 1. This SPAM vector is meant for
use with CPTP processes, hence the name.
"""
def __init__(self, vec, basis, truncate=False):
"""
Initialize a CPTPSPAMVec object.
Parameters
----------
vec : array_like or SPAMVec
a 1D numpy array representing the SPAM operation. The
shape of this array sets the dimension of the SPAM op.
basis : {"std", "gm", "pp", "qt"} or Basis
The basis `vec` is in. Needed because this parameterization
requires we construct the density matrix corresponding to
the Lioville vector `vec`.
trunctate : bool, optional
Whether or not a non-positive, trace=1 `vec` should
be truncated to force a successful construction.
"""
vector = SPAMVec.convert_to_vector(vec)
basis = _Basis.cast(basis, len(vector))
self.basis = basis
self.basis_mxs = basis.elements # shape (len(vec), dmDim, dmDim)
self.basis_mxs = _np.rollaxis(self.basis_mxs, 0, 3) # shape (dmDim, dmDim, len(vec))
assert(self.basis_mxs.shape[-1] == len(vector))
# set self.params and self.dmDim
self._set_params_from_vector(vector, truncate)
#scratch space
self.Lmx = _np.zeros((self.dmDim, self.dmDim), 'complex')
DenseSPAMVec.__init__(self, vector, "densitymx", "prep")
def _set_params_from_vector(self, vector, truncate):
density_mx = _np.dot(self.basis_mxs, vector)
density_mx = density_mx.squeeze()
dmDim = density_mx.shape[0]
assert(dmDim == density_mx.shape[1]), "Density matrix must be square!"
trc = _np.trace(density_mx)
assert(truncate or _np.isclose(trc, 1.0)), \
"`vec` must correspond to a trace-1 density matrix (truncate == False)!"
if not _np.isclose(trc, 1.0): # truncate to trace == 1
density_mx -= _np.identity(dmDim, 'd') / dmDim * (trc - 1.0)
#push any slightly negative evals of density_mx positive
# so that the Cholesky decomp will work.
evals, U = _np.linalg.eig(density_mx)
Ui = _np.linalg.inv(U)
assert(truncate or all([ev >= -1e-12 for ev in evals])), \
"`vec` must correspond to a positive density matrix (truncate == False)!"
pos_evals = evals.clip(1e-16, 1e100)
density_mx = _np.dot(U, _np.dot(_np.diag(pos_evals), Ui))
try:
Lmx = _np.linalg.cholesky(density_mx)
except _np.linalg.LinAlgError: # Lmx not postitive definite?
pos_evals = evals.clip(1e-12, 1e100) # try again with 1e-12
density_mx = _np.dot(U, _np.dot(_np.diag(pos_evals), Ui))
Lmx = _np.linalg.cholesky(density_mx)
#check TP condition: that diagonal els of Lmx squared add to 1.0
Lmx_norm = _np.trace(_np.dot(Lmx.T.conjugate(), Lmx)) # sum of magnitude^2 of all els
assert(_np.isclose(Lmx_norm, 1.0)), \
"Cholesky decomp didn't preserve trace=1!"
self.dmDim = dmDim
self.params = _np.empty(dmDim**2, 'd')
for i in range(dmDim):
assert(_np.linalg.norm(_np.imag(Lmx[i, i])) < IMAG_TOL)
self.params[i * dmDim + i] = Lmx[i, i].real # / paramNorm == 1 as asserted above
for j in range(i):
self.params[i * dmDim + j] = Lmx[i, j].real
self.params[j * dmDim + i] = Lmx[i, j].imag
def _construct_vector(self):
dmDim = self.dmDim
# params is an array of length dmDim^2-1 that
# encodes a lower-triangular matrix "Lmx" via:
# Lmx[i,i] = params[i*dmDim + i] / param-norm # i = 0...dmDim-2
# *last diagonal el is given by sqrt(1.0 - sum(L[i,j]**2))
# Lmx[i,j] = params[i*dmDim + j] + 1j*params[j*dmDim+i] (i > j)
param2Sum = _np.vdot(self.params, self.params) # or "dot" would work, since params are real
paramNorm = _np.sqrt(param2Sum) # also the norm of *all* Lmx els
for i in range(dmDim):
self.Lmx[i, i] = self.params[i * dmDim + i] / paramNorm
for j in range(i):
self.Lmx[i, j] = (self.params[i * dmDim + j] + 1j * self.params[j * dmDim + i]) / paramNorm
Lmx_norm = _np.trace(_np.dot(self.Lmx.T.conjugate(), self.Lmx)) # sum of magnitude^2 of all els
assert(_np.isclose(Lmx_norm, 1.0)), "Violated trace=1 condition!"
#The (complex, Hermitian) density matrix is build by
# assuming Lmx is its Cholesky decomp, which makes
# the density matrix is pos-def.
density_mx = _np.dot(self.Lmx, self.Lmx.T.conjugate())
assert(_np.isclose(_np.trace(density_mx), 1.0)), "density matrix must be trace == 1"
# write density matrix in given basis: = sum_i alpha_i B_i
# ASSUME that basis is orthogonal, i.e. Tr(Bi^dag*Bj) = delta_ij
basis_mxs = _np.rollaxis(self.basis_mxs, 2) # shape (dmDim, dmDim, len(vec))
vec = _np.array([_np.trace(_np.dot(M.T.conjugate(), density_mx)) for M in basis_mxs])
#for now, assume Liouville vector should always be real (TODO: add 'real' flag later?)
assert(_np.linalg.norm(_np.imag(vec)) < IMAG_TOL)
vec = _np.real(vec)
self.base1D.flags.writeable = True
self.base1D[:] = vec[:] # so shape is (dim,1) - the convention for spam vectors
self.base1D.flags.writeable = False
def set_value(self, vec):
"""
Attempts to modify SPAMVec parameters so that the specified raw
SPAM vector becomes vec. Will raise ValueError if this operation
is not possible.
Parameters
----------
vec : array_like or SPAMVec
A numpy array representing a SPAM vector, or a SPAMVec object.
Returns
-------
None
"""
try:
self._set_params_from_vector(vec, truncate=False)
self.dirty = True
except AssertionError as e:
raise ValueError("Error initializing the parameters of this "
"CPTPSPAMVec object: " + str(e))
def num_params(self):
"""
Get the number of independent parameters which specify this SPAM vector.
Returns
-------
int
the number of independent parameters.
"""
assert(self.dmDim**2 == self.dim) # should at least be true without composite bases...
return self.dmDim**2
def to_vector(self):
"""
Get the SPAM vector parameters as an array of values.
Returns
-------
numpy array
The parameters as a 1D array with length num_params().
"""
return self.params
def from_vector(self, v, close=False, nodirty=False):
"""
Initialize the SPAM vector using a 1D array of parameters.
Parameters
----------
v : numpy array
The 1D vector of gate parameters. Length
must == num_params()
Returns
-------
None
"""
assert(len(v) == self.num_params())
self.params[:] = v[:]
self._construct_vector()
if not nodirty: self.dirty = True
def deriv_wrt_params(self, wrtFilter=None):
"""
Construct a matrix whose columns are the derivatives of the SPAM vector
with respect to a single param. Thus, each column is of length
get_dimension and there is one column per SPAM vector parameter.
Returns
-------
numpy array
Array of derivatives, shape == (dimension, num_params)
"""
dmDim = self.dmDim
nP = len(self.params)
assert(nP == dmDim**2) # number of parameters
# v_i = trace( B_i^dag * Lmx * Lmx^dag )
# d(v_i) = trace( B_i^dag * (dLmx * Lmx^dag + Lmx * (dLmx)^dag) ) #trace = linear so commutes w/deriv
# /
# where dLmx/d[ab] = {
# \
L, Lbar = self.Lmx, self.Lmx.conjugate()
F1 = _np.tril(_np.ones((dmDim, dmDim), 'd'))
F2 = _np.triu(_np.ones((dmDim, dmDim), 'd'), 1) * 1j
conj_basis_mxs = self.basis_mxs.conjugate()
# Derivative of vector wrt params; shape == [vecLen,dmDim,dmDim] *not dealing with TP condition yet*
# (first get derivative assuming last diagonal el of Lmx *is* a parameter, then use chain rule)
dVdp = _np.einsum('aml,mb,ab->lab', conj_basis_mxs, Lbar, F1) # only a >= b nonzero (F1)
dVdp += _np.einsum('mal,mb,ab->lab', conj_basis_mxs, L, F1) # ditto
dVdp += _np.einsum('bml,ma,ab->lab', conj_basis_mxs, Lbar, F2) # only b > a nonzero (F2)
dVdp += _np.einsum('mbl,ma,ab->lab', conj_basis_mxs, L, F2.conjugate()) # ditto
dVdp.shape = [dVdp.shape[0], nP] # jacobian with respect to "p" params,
# which don't include normalization for TP-constraint
#Now get jacobian of actual params wrt the params used above. Denote the actual
# params "P" in variable names, so p_ij = P_ij / sqrt(sum(P_xy**2))
param2Sum = _np.vdot(self.params, self.params)
paramNorm = _np.sqrt(param2Sum) # norm of *all* Lmx els (note lastDiagEl
dpdP = _np.identity(nP, 'd')
# all p_ij params == P_ij / paramNorm = P_ij / sqrt(sum(P_xy**2))
# and so have derivs wrt *all* Pxy elements.
for ij in range(nP):
for kl in range(nP):
if ij == kl:
# dp_ij / dP_ij = 1.0 / (sum(P_xy**2))^(1/2) - 0.5 * P_ij / (sum(P_xy**2))^(3/2) * 2*P_ij
# = 1.0 / (sum(P_xy**2))^(1/2) - P_ij^2 / (sum(P_xy**2))^(3/2)
dpdP[ij, ij] = 1.0 / paramNorm - self.params[ij]**2 / paramNorm**3
else:
# dp_ij / dP_kl = -0.5 * P_ij / (sum(P_xy**2))^(3/2) * 2*P_kl
# = - P_ij * P_kl / (sum(P_xy**2))^(3/2)
dpdP[ij, kl] = - self.params[ij] * self.params[kl] / paramNorm**3
#Apply the chain rule to get dVdP:
dVdP = _np.dot(dVdp, dpdP) # shape (vecLen, nP) - the jacobian!
dVdp = dpdP = None # free memory!
assert(_np.linalg.norm(_np.imag(dVdP)) < IMAG_TOL)
derivMx = _np.real(dVdP)
if wrtFilter is None:
return derivMx
else:
return _np.take(derivMx, wrtFilter, axis=1)
def has_nonzero_hessian(self):
"""
Returns whether this SPAM vector has a non-zero Hessian with
respect to its parameters, i.e. whether it only depends
linearly on its parameters or not.
Returns
-------
bool
"""
return True
def hessian_wrt_params(self, wrtFilter1=None, wrtFilter2=None):
"""
Construct the Hessian of this SPAM vector with respect to its parameters.
This function returns a tensor whose first axis corresponds to the
flattened operation matrix and whose 2nd and 3rd axes correspond to the
parameters that are differentiated with respect to.
Parameters
----------
wrtFilter1, wrtFilter2 : list
Lists of indices of the paramters to take first and second
derivatives with respect to. If None, then derivatives are
taken with respect to all of the vectors's parameters.
Returns
-------
numpy array
Hessian with shape (dimension, num_params1, num_params2)
"""
raise NotImplementedError("TODO: add hessian computation for CPTPSPAMVec")
class TensorProdSPAMVec(SPAMVec):
"""
Encapsulates a SPAM vector that is a tensor-product of other SPAM vectors.
"""
def __init__(self, typ, factors, povmEffectLbls=None):
"""
Initialize a TensorProdSPAMVec object.
Parameters
----------
typ : {"prep","effect"}
The type of spam vector - either a product of preparation
vectors ("prep") or of POVM effect vectors ("effect")
factors : list of SPAMVecs or POVMs
if `typ == "prep"`, a list of the component SPAMVecs; if
`typ == "effect"`, a list of "reference" POVMs into which
`povmEffectLbls` indexes.
povmEffectLbls : array-like
Only non-None when `typ == "effect"`. The effect label of each
factor POVM which is tensored together to form this SPAM vector.
"""
assert(len(factors) > 0), "Must have at least one factor!"
self.factors = factors # do *not* copy - needs to reference common objects
self.Np = sum([fct.num_params() for fct in factors])
if typ == "effect":
self.effectLbls = _np.array(povmEffectLbls)
elif typ == "prep":
assert(povmEffectLbls is None), '`povmEffectLbls` must be None when `typ != "effects"`'
self.effectLbls = None
else: raise ValueError("Invalid `typ` argument: %s" % typ)
dim = _np.product([fct.dim for fct in factors])
evotype = self.factors[0]._evotype
#Arrays for speeding up kron product in effect reps
if evotype in ("statevec", "densitymx") and typ == "effect": # types that require fast kronecker prods
max_factor_dim = max(fct.dim for fct in factors)
self._fast_kron_array = _np.ascontiguousarray(
_np.empty((len(factors), max_factor_dim), complex if evotype == "statevec" else 'd'))
self._fast_kron_factordims = _np.ascontiguousarray(
_np.array([fct.dim for fct in factors], _np.int64))
else: # "stabilizer", "svterm", "cterm"
self._fast_kron_array = None
self._fast_kron_factordims = None
#Create representation
if evotype == "statevec":
if typ == "prep": # prep-type vectors can be represented as dense effects too
rep = replib.SVStateRep(_np.ascontiguousarray(_np.zeros(dim, complex)))
#sometimes: return replib.SVEffectRep_Dense(self.todense()) ???
else: # "effect"
rep = replib.SVEffectRep_TensorProd(self._fast_kron_array, self._fast_kron_factordims,
len(self.factors), self._fast_kron_array.shape[1], dim)
elif evotype == "densitymx":
if typ == "prep":
vec = _np.require(_np.zeros(dim, 'd'), requirements=['OWNDATA', 'C_CONTIGUOUS'])
rep = replib.DMStateRep(vec)
#sometimes: return replib.DMEffectRep_Dense(self.todense()) ???
else: # "effect"
rep = replib.DMEffectRep_TensorProd(self._fast_kron_array, self._fast_kron_factordims,
len(self.factors), self._fast_kron_array.shape[1], dim)
elif evotype == "stabilizer":
if typ == "prep":
#Rep is stabilizer-rep tuple, just like StabilizerSPAMVec
sframe_factors = [f.todense() for f in self.factors] # StabilizerFrame for each factor
rep = _stabilizer.sframe_kronecker(sframe_factors).torep()
#Sometimes ???
# prep-type vectors can be represented as dense effects too; this just means that self.factors
# => self.factors should all be StabilizerSPAMVec objs
#else: # self._prep_or_effect == "effect", so each factor is a StabilizerEffectVec
# outcomes = _np.array(list(_itertools.chain(*[f.outcomes for f in self.factors])), _np.int64)
# return replib.SBEffectRep(outcomes)
else: # self._prep_or_effect == "effect", so each factor is a StabilizerZPOVM
# like above, but get a StabilizerEffectVec from each StabilizerZPOVM factor
factorPOVMs = self.factors
factorVecs = [factorPOVMs[i][self.effectLbls[i]] for i in range(1, len(factorPOVMs))]
outcomes = _np.array(list(_itertools.chain(*[f.outcomes for f in factorVecs])), _np.int64)
rep = replib.SBEffectRep(outcomes)
#OLD - now can remove outcomes prop?
#raise ValueError("Cannot convert Stabilizer tensor product effect to a representation!")
# should be using effect.outcomes property...
else: # self._evotype in ("svterm","cterm")
rep = dim # no reps for term-based evotypes
SPAMVec.__init__(self, rep, evotype, typ)
self._update_rep() # initializes rep data
#sets gpindices, so do before stuff below
if typ == "effect":
#Set our parent and gpindices based on those of factor-POVMs, which
# should all be owned by a TensorProdPOVM object.
# (for now say we depend on *all* the POVMs parameters (even though
# we really only depend on one element of each POVM, which may allow
# using just a subset of each factor POVMs indices - but this is tricky).
self.set_gpindices(_slct.list_to_slice(
_np.concatenate([fct.gpindices_as_array()
for fct in factors], axis=0), True, False),
factors[0].parent) # use parent of first factor
# (they should all be the same)
else:
# don't init our own gpindices (prep case), since our parent
# is likely to be a Model and it will init them correctly.
#But do set the indices of self.factors, since they're now
# considered "owned" by this product-prep-vec (different from
# the "effect" case when the factors are shared).
off = 0
for fct in factors:
assert(isinstance(fct, SPAMVec)), "Factors must be SPAMVec objects!"
N = fct.num_params()
fct.set_gpindices(slice(off, off + N), self); off += N
assert(off == self.Np)
def _fill_fast_kron(self):
""" Fills in self._fast_kron_array based on current self.factors """
if self._prep_or_effect == "prep":
for i, factor_dim in enumerate(self._fast_kron_factordims):
self._fast_kron_array[i][0:factor_dim] = self.factors[i].todense()
else:
factorPOVMs = self.factors
for i, (factor_dim, Elbl) in enumerate(zip(self._fast_kron_factordims, self.effectLbls)):
self._fast_kron_array[i][0:factor_dim] = factorPOVMs[i][Elbl].todense()
def _update_rep(self):
if self._evotype in ("statevec", "densitymx"):
if self._prep_or_effect == "prep":
self._rep.base[:] = self.todense()
else:
self._fill_fast_kron() # updates effect reps
elif self._evotype == "stabilizer":
if self._prep_or_effect == "prep":
#we need to update self._rep, which is a SBStateRep object. For now, we
# kinda punt and just create a new rep and copy its data over to the existing
# rep instead of having some kind of update method within SBStateRep...
# (TODO FUTURE - at least a .copy_from method?)
sframe_factors = [f.todense() for f in self.factors] # StabilizerFrame for each factor
new_rep = _stabilizer.sframe_kronecker(sframe_factors).torep()
self._rep.smatrix[:, :] = new_rep.smatrix[:, :]
self._rep.pvectors[:, :] = new_rep.pvectors[:, :]
self._rep.amps[:, :] = new_rep.amps[:, :]
else:
pass # I think the below (e.g. 'outcomes') is not altered by any parameters
#factorPOVMs = self.factors
#factorVecs = [factorPOVMs[i][self.effectLbls[i]] for i in range(1, len(factorPOVMs))]
#outcomes = _np.array(list(_itertools.chain(*[f.outcomes for f in factorVecs])), _np.int64)
#rep = replib.SBEffectRep(outcomes)
def todense(self):
"""
Return this SPAM vector as a (dense) numpy array.
"""
if self._evotype in ("statevec", "densitymx"):
if len(self.factors) == 0: return _np.empty(0, complex if self._evotype == "statevec" else 'd')
#NOTE: moved a fast version of todense to replib - could use that if need a fast todense call...
if self._prep_or_effect == "prep":
ret = self.factors[0].todense() # factors are just other SPAMVecs
for i in range(1, len(self.factors)):
ret = _np.kron(ret, self.factors[i].todense())
else:
factorPOVMs = self.factors
ret = factorPOVMs[0][self.effectLbls[0]].todense()
for i in range(1, len(factorPOVMs)):
ret = _np.kron(ret, factorPOVMs[i][self.effectLbls[i]].todense())
return ret
elif self._evotype == "stabilizer":
if self._prep_or_effect == "prep":
# => self.factors should all be StabilizerSPAMVec objs
#Return stabilizer-rep tuple, just like StabilizerSPAMVec
sframe_factors = [f.todense() for f in self.factors]
return _stabilizer.sframe_kronecker(sframe_factors)
else: # self._prep_or_effect == "effect", so each factor is a StabilizerEffectVec
raise ValueError("Cannot convert Stabilizer tensor product effect to an array!")
# should be using effect.outcomes property...
else: # self._evotype in ("svterm","cterm")
raise NotImplementedError("todense() not implemented for %s evolution type" % self._evotype)
#def torep(self, typ, outrep=None):
# """
# Return a "representation" object for this SPAM vector.
#
# Such objects are primarily used internally by pyGSTi to compute
# things like probabilities more efficiently.
#
# Parameters
# ----------
# typ : {'prep','effect'}
# The type of representation (for cases when the vector type is
# not already defined).
#
# outrep : StateRep
# If not None, an existing state representation appropriate to this
# SPAM vector that may be used instead of allocating a new one.
#
# Returns
# -------
# StateRep
# """
# assert(len(self.factors) > 0), "Cannot get representation of a TensorProdSPAMVec with no factors!"
# assert(self._prep_or_effect in ('prep', 'effect')), "Invalid internal type: %s!" % self._prep_or_effect
#
# #FUTURE: use outrep as scratch for rep constructor?
# if self._evotype == "statevec":
# if self._prep_or_effect == "prep": # prep-type vectors can be represented as dense effects too
# if typ == "prep":
# return replib.SVStateRep(self.todense())
# else:
# return replib.SVEffectRep_Dense(self.todense())
# else:
# return replib.SVEffectRep_TensorProd(self._fast_kron_array, self._fast_kron_factordims,
# len(self.factors), self._fast_kron_array.shape[1], self.dim)
# elif self._evotype == "densitymx":
# if self._prep_or_effect == "prep":
# if typ == "prep":
# return replib.DMStateRep(self.todense())
# else:
# return replib.DMEffectRep_Dense(self.todense())
#
# else:
# return replib.DMEffectRep_TensorProd(self._fast_kron_array, self._fast_kron_factordims,
# len(self.factors), self._fast_kron_array.shape[1], self.dim)
#
# elif self._evotype == "stabilizer":
# if self._prep_or_effect == "prep":
# # prep-type vectors can be represented as dense effects too; this just means that self.factors
# if typ == "prep":
# # => self.factors should all be StabilizerSPAMVec objs
# #Return stabilizer-rep tuple, just like StabilizerSPAMVec
# sframe_factors = [f.todense() for f in self.factors] # StabilizerFrame for each factor
# return _stabilizer.sframe_kronecker(sframe_factors).torep()
# else: # self._prep_or_effect == "effect", so each factor is a StabilizerEffectVec
# outcomes = _np.array(list(_itertools.chain(*[f.outcomes for f in self.factors])), _np.int64)
# return replib.SBEffectRep(outcomes)
#
# else: # self._prep_or_effect == "effect", so each factor is a StabilizerZPOVM
# # like above, but get a StabilizerEffectVec from each StabilizerZPOVM factor
# factorPOVMs = self.factors
# factorVecs = [factorPOVMs[i][self.effectLbls[i]] for i in range(1, len(factorPOVMs))]
# outcomes = _np.array(list(_itertools.chain(*[f.outcomes for f in factorVecs])), _np.int64)
# return replib.SBEffectRep(outcomes)
#
# #OLD - now can remove outcomes prop?
# #raise ValueError("Cannot convert Stabilizer tensor product effect to a representation!")
# # should be using effect.outcomes property...
#
# else: # self._evotype in ("svterm","cterm")
# raise NotImplementedError("torep() not implemented for %s evolution type" % self._evotype)
def get_taylor_order_terms(self, order, max_poly_vars=100, return_coeff_polys=False):
"""
Get the `order`-th order Taylor-expansion terms of this SPAM vector.
This function either constructs or returns a cached list of the terms at
the given order. Each term is "rank-1", meaning that it is a state
preparation followed by or POVM effect preceded by actions on a
density matrix `rho` of the form:
`rho -> A rho B`
The coefficients of these terms are typically polynomials of the
SPAMVec's parameters, where the polynomial's variable indices index the
*global* parameters of the SPAMVec's parent (usually a :class:`Model`)
, not the SPAMVec's local parameter array (i.e. that returned from
`to_vector`).
Parameters
----------
order : int
The order of terms to get.
return_coeff_polys : bool
Whether a parallel list of locally-indexed (using variable indices
corresponding to *this* object's parameters rather than its parent's)
polynomial coefficients should be returned as well.
Returns
-------
terms : list
A list of :class:`RankOneTerm` objects.
coefficients : list
Only present when `return_coeff_polys == True`.
A list of *compact* polynomial objects, meaning that each element
is a `(vtape,ctape)` 2-tuple formed by concatenating together the
output of :method:`Polynomial.compact`.
"""
from .operation import EmbeddedOp as _EmbeddedGateMap
terms = []
fnq = [int(round(_np.log2(f.dim))) // 2 for f in self.factors] # num of qubits per factor
# assumes density matrix evolution
total_nQ = sum(fnq) # total number of qubits
for p in _lt.partition_into(order, len(self.factors)):
if self._prep_or_effect == "prep":
factor_lists = [self.factors[i].get_taylor_order_terms(pi, max_poly_vars) for i, pi in enumerate(p)]
else:
factorPOVMs = self.factors
factor_lists = [factorPOVMs[i][Elbl].get_taylor_order_terms(pi, max_poly_vars)
for i, (pi, Elbl) in enumerate(zip(p, self.effectLbls))]
# When possible, create COLLAPSED factor_lists so each factor has just a single
# (SPAMVec) pre & post op, which can be formed into the new terms'
# TensorProdSPAMVec ops.
# - DON'T collapse stabilizer states & clifford ops - can't for POVMs
collapsible = False # bool(self._evotype =="svterm") # need to use reps for collapsing now... TODO?
if collapsible:
factor_lists = [[t.collapse_vec() for t in fterms] for fterms in factor_lists]
for factors in _itertools.product(*factor_lists):
# create a term with a TensorProdSPAMVec - Note we always create
# "prep"-mode vectors, since even when self._prep_or_effect == "effect" these
# vectors are created with factor (prep- or effect-type) SPAMVecs not factor POVMs
# we workaround this by still allowing such "prep"-mode
# TensorProdSPAMVecs to be represented as effects (i.e. in torep('effect'...) works)
coeff = _functools.reduce(lambda x, y: x.mult(y), [f.coeff for f in factors])
pre_op = TensorProdSPAMVec("prep", [f.pre_ops[0] for f in factors
if (f.pre_ops[0] is not None)])
post_op = TensorProdSPAMVec("prep", [f.post_ops[0] for f in factors
if (f.post_ops[0] is not None)])
term = _term.RankOnePolyPrepTerm.simple_init(coeff, pre_op, post_op, self._evotype)
if not collapsible: # then may need to add more ops. Assume factor ops are clifford gates
# Embed each factors ops according to their target qubit(s) and just daisy chain them
stateSpaceLabels = tuple(range(total_nQ)); curQ = 0
for f, nq in zip(factors, fnq):
targetLabels = tuple(range(curQ, curQ + nq)); curQ += nq
term.pre_ops.extend([_EmbeddedGateMap(stateSpaceLabels, targetLabels, op)
for op in f.pre_ops[1:]]) # embed and add ops
term.post_ops.extend([_EmbeddedGateMap(stateSpaceLabels, targetLabels, op)
for op in f.post_ops[1:]]) # embed and add ops
terms.append(term)
if return_coeff_polys:
def _decompose_indices(x):
return tuple(_modelmember._decompose_gpindices(
self.gpindices, _np.array(x, _np.int64)))
poly_coeffs = [t.coeff.map_indices(_decompose_indices) for t in terms] # with *local* indices
tapes = [poly.compact(complex_coeff_tape=True) for poly in poly_coeffs]
if len(tapes) > 0:
vtape = _np.concatenate([t[0] for t in tapes])
ctape = _np.concatenate([t[1] for t in tapes])
else:
vtape = _np.empty(0, _np.int64)
ctape = _np.empty(0, complex)
coeffs_as_compact_polys = (vtape, ctape)
#self.local_term_poly_coeffs[order] = coeffs_as_compact_polys #FUTURE?
return terms, coeffs_as_compact_polys
else:
return terms # Cache terms in FUTURE?
def num_params(self):
"""
Get the number of independent parameters which specify this SPAM vector.
Returns
-------
int
the number of independent parameters.
"""
return self.Np
def to_vector(self):
"""
Get the SPAM vector parameters as an array of values.
Returns
-------
numpy array
The parameters as a 1D array with length num_params().
"""
if self._prep_or_effect == "prep":
return _np.concatenate([fct.to_vector() for fct in self.factors], axis=0)
else:
raise ValueError(("'`to_vector` should not be called on effect-like"
" TensorProdSPAMVecs (instead it should be called"
" on the POVM)"))
def from_vector(self, v, close=False, nodirty=False):
"""
Initialize the SPAM vector using a 1D array of parameters.
Parameters
----------
v : numpy array
The 1D vector of gate parameters. Length
must == num_params()
Returns
-------
None
"""
if self._prep_or_effect == "prep":
for sv in self.factors:
sv.from_vector(v[sv.gpindices], close, nodirty) # factors hold local indices
elif all([self.effectLbls[i] == list(povm.keys())[0]
for i, povm in enumerate(self.factors)]):
#then this is the *first* vector in the larger TensorProdPOVM
# and we should initialize all of the factorPOVMs
for povm in self.factors:
local_inds = _modelmember._decompose_gpindices(
self.gpindices, povm.gpindices)
povm.from_vector(v[local_inds], close, nodirty)
#Update representation, which may be a dense matrix or
# just fast-kron arrays or a stabilizer state.
self._update_rep()
def deriv_wrt_params(self, wrtFilter=None):
"""
Construct a matrix whose columns are the derivatives of the SPAM vector
with respect to a single param. Thus, each column is of length
get_dimension and there is one column per SPAM vector parameter.
An empty 2D array in the StaticSPAMVec case (num_params == 0).
Returns
-------
numpy array
Array of derivatives, shape == (dimension, num_params)
"""
assert(self._evotype in ("statevec", "densitymx"))
typ = complex if self._evotype == "statevec" else 'd'
derivMx = _np.zeros((self.dim, self.num_params()), typ)
#Product rule to compute jacobian
for i, fct in enumerate(self.factors): # loop over the spamvec/povm we differentiate wrt
vec = fct if (self._prep_or_effect == "prep") else fct[self.effectLbls[i]]
if vec.num_params() == 0: continue # no contribution
deriv = vec.deriv_wrt_params(None) # TODO: use filter?? / make relative to this gate...
deriv.shape = (vec.dim, vec.num_params())
if i > 0: # factors before ith
if self._prep_or_effect == "prep":
pre = self.factors[0].todense()
for vecA in self.factors[1:i]:
pre = _np.kron(pre, vecA.todense())
else:
pre = self.factors[0][self.effectLbls[0]].todense()
for j, fctA in enumerate(self.factors[1:i], start=1):
pre = _np.kron(pre, fctA[self.effectLbls[j]].todense())
deriv = _np.kron(pre[:, None], deriv) # add a dummy 1-dim to 'pre' and do kron properly...
if i + 1 < len(self.factors): # factors after ith
if self._prep_or_effect == "prep":
post = self.factors[i + 1].todense()
for vecA in self.factors[i + 2:]:
post = _np.kron(post, vecA.todense())
else:
post = self.factors[i + 1][self.effectLbls[i + 1]].todense()
for j, fctA in enumerate(self.factors[i + 2:], start=i + 2):
post = _np.kron(post, fctA[self.effectLbls[j]].todense())
deriv = _np.kron(deriv, post[:, None]) # add a dummy 1-dim to 'post' and do kron properly...
if self._prep_or_effect == "prep":
local_inds = fct.gpindices # factor vectors hold local indices
else: # in effect case, POVM-factors hold global indices (b/c they're meant to be shareable)
local_inds = _modelmember._decompose_gpindices(
self.gpindices, fct.gpindices)
assert(local_inds is not None), \
"Error: gpindices has not been initialized for factor %d - cannot compute derivative!" % i
derivMx[:, local_inds] += deriv
derivMx.shape = (self.dim, self.num_params()) # necessary?
if wrtFilter is None:
return derivMx
else:
return _np.take(derivMx, wrtFilter, axis=1)
def has_nonzero_hessian(self):
"""
Returns whether this SPAM vector has a non-zero Hessian with
respect to its parameters, i.e. whether it only depends
linearly on its parameters or not.
Returns
-------
bool
"""
return False
def __str__(self):
s = "Tensor product %s vector with length %d\n" % (self._prep_or_effect, self.dim)
#ar = self.todense()
#s += _mt.mx_to_string(ar, width=4, prec=2)
if self._prep_or_effect == "prep":
# factors are just other SPAMVecs
s += " x ".join([_mt.mx_to_string(fct.todense(), width=4, prec=2) for fct in self.factors])
else:
# factors are POVMs
s += " x ".join([_mt.mx_to_string(fct[self.effectLbls[i]].todense(), width=4, prec=2)
for i, fct in enumerate(self.factors)])
return s
class PureStateSPAMVec(SPAMVec):
"""
Encapsulates a SPAM vector that is a pure state but evolves according to
one of the density matrix evolution types ("denstiymx", "svterm", and
"cterm"). It is parameterized by a contained pure-state SPAMVec which
evolves according to a state vector evolution type ("statevec" or
"stabilizer").
"""
def __init__(self, pure_state_vec, evotype='densitymx', dm_basis='pp', typ="prep"):
"""
Initialize a PureStateSPAMVec object.
Parameters
----------
pure_state_vec : array_like or SPAMVec
a 1D numpy array or object representing the pure state. This object
sets the parameterization and dimension of this SPAM vector (if
`pure_state_vec`'s dimension is `d`, then this SPAM vector's
dimension is `d^2`). Assumed to be a complex vector in the
standard computational basis.
evotype : {'densitymx', 'svterm', 'cterm'}
The evolution type of this SPAMVec. Note that the evotype of
`pure_state_vec` must be compatible with this value. In particular,
`pure_state_vec` must have an evotype of `"statevec"` (then allowed
values are `"densitymx"` and `"svterm"`) or `"stabilizer"` (then
the only allowed value is `"cterm"`).
dm_basis : {'std', 'gm', 'pp', 'qt'} or Basis object
The basis for this SPAM vector - that is, for the *density matrix*
corresponding to `pure_state_vec`. Allowed values are Matrix-unit
(std), Gell-Mann (gm), Pauli-product (pp), and Qutrit (qt)
(or a custom basis object).
"""
if not isinstance(pure_state_vec, SPAMVec):
pure_state_vec = StaticSPAMVec(SPAMVec.convert_to_vector(pure_state_vec), 'statevec')
self.pure_state_vec = pure_state_vec
self.basis = dm_basis # only used for dense conversion
pure_evo = pure_state_vec._evotype
if pure_evo == "statevec":
if evotype not in ("densitymx", "svterm"):
raise ValueError(("`evotype` arg must be 'densitymx' or 'svterm'"
" when `pure_state_vec` evotype is 'statevec'"))
elif pure_evo == "stabilizer":
if evotype not in ("cterm",):
raise ValueError(("`evotype` arg must be 'densitymx' or 'svterm'"
" when `pure_state_vec` evotype is 'statevec'"))
else:
raise ValueError("`pure_state_vec` evotype must be 'statevec' or 'stabilizer' (not '%s')" % pure_evo)
dim = self.pure_state_vec.dim**2
rep = dim # no representation yet... maybe this should be a dense vector
# (in "densitymx" evotype case -- use todense)? TODO
#Create representation
SPAMVec.__init__(self, rep, evotype, typ)
def todense(self, scratch=None):
"""
Return this SPAM vector as a (dense) numpy array. The memory
in `scratch` maybe used when it is not-None.
"""
dmVec_std = _gt.state_to_dmvec(self.pure_state_vec.todense())
return _bt.change_basis(dmVec_std, 'std', self.basis)
def get_taylor_order_terms(self, order, max_poly_vars=100, return_coeff_polys=False):
"""
Get the `order`-th order Taylor-expansion terms of this SPAM vector.
This function either constructs or returns a cached list of the terms at
the given order. Each term is "rank-1", meaning that it is a state
preparation followed by or POVM effect preceded by actions on a
density matrix `rho` of the form:
`rho -> A rho B`
The coefficients of these terms are typically polynomials of the
SPAMVec's parameters, where the polynomial's variable indices index the
*global* parameters of the SPAMVec's parent (usually a :class:`Model`)
, not the SPAMVec's local parameter array (i.e. that returned from
`to_vector`).
Parameters
----------
order : int
The order of terms to get.
return_coeff_polys : bool
Whether a parallel list of locally-indexed (using variable indices
corresponding to *this* object's parameters rather than its parent's)
polynomial coefficients should be returned as well.
Returns
-------
terms : list
A list of :class:`RankOneTerm` objects.
coefficients : list
Only present when `return_coeff_polys == True`.
A list of *compact* polynomial objects, meaning that each element
is a `(vtape,ctape)` 2-tuple formed by concatenating together the
output of :method:`Polynomial.compact`.
"""
if self.num_params() > 0:
raise ValueError(("PureStateSPAMVec.get_taylor_order_terms(...) is only "
"implemented for the case when its underlying "
"pure state vector has 0 parameters (is static)"))
if order == 0: # only 0-th order term exists (assumes static pure_state_vec)
purevec = self.pure_state_vec
coeff = _Polynomial({(): 1.0}, max_poly_vars)
if self._prep_or_effect == "prep":
terms = [_term.RankOnePolyPrepTerm.simple_init(coeff, purevec, purevec, self._evotype)]
else:
terms = [_term.RankOnePolyEffectTerm.simple_init(coeff, purevec, purevec, self._evotype)]
if return_coeff_polys:
coeffs_as_compact_polys = coeff.compact(complex_coeff_tape=True)
return terms, coeffs_as_compact_polys
else:
return terms
else:
if return_coeff_polys:
vtape = _np.empty(0, _np.int64)
ctape = _np.empty(0, complex)
return [], (vtape, ctape)
else:
return []
#TODO REMOVE
#def get_direct_order_terms(self, order, base_order):
# """
# Parameters
# ----------
# order : int
# The order of terms to get.
#
# Returns
# -------
# list
# A list of :class:`RankOneTerm` objects.
# """
# if self.num_params() > 0:
# raise ValueError(("PureStateSPAMVec.get_taylor_order_terms(...) is only "
# "implemented for the case when its underlying "
# "pure state vector has 0 parameters (is static)"))
#
# if order == 0: # only 0-th order term exists (assumes static pure_state_vec)
# if self._evotype == "svterm": tt = "dense"
# elif self._evotype == "cterm": tt = "clifford"
# else: raise ValueError("Invalid evolution type %s for calling `get_taylor_order_terms`" % self._evotype)
#
# purevec = self.pure_state_vec
# terms = [ _term.RankOneTerm(1.0, purevec, purevec, tt) ]
# else:
# terms = []
# return terms
def num_params(self):
"""
Get the number of independent parameters which specify this SPAM vector.
Returns
-------
int
the number of independent parameters.
"""
return self.pure_state_vec.num_params()
def to_vector(self):
"""
Get the SPAM vector parameters as an array of values.
Returns
-------
numpy array
The parameters as a 1D array with length num_params().
"""
return self.pure_state_vec.to_vector()
def from_vector(self, v, close=False, nodirty=False):
"""
Initialize the SPAM vector using a 1D array of parameters.
Parameters
----------
v : numpy array
The 1D vector of gate parameters. Length
must == num_params()
Returns
-------
None
"""
self.pure_state_vec.from_vector(v, close, nodirty)
#Update dense rep if one is created (TODO)
def deriv_wrt_params(self, wrtFilter=None):
"""
Construct a matrix whose columns are the derivatives of the SPAM vector
with respect to a single param. Thus, each column is of length
get_dimension and there is one column per SPAM vector parameter.
An empty 2D array in the StaticSPAMVec case (num_params == 0).
Returns
-------
numpy array
Array of derivatives, shape == (dimension, num_params)
"""
raise NotImplementedError("Still need to work out derivative calculation of PureStateSPAMVec")
def has_nonzero_hessian(self):
"""
Returns whether this SPAM vector has a non-zero Hessian with
respect to its parameters, i.e. whether it only depends
linearly on its parameters or not.
Returns
-------
bool
"""
return self.pure_state_vec.has_nonzero_hessian()
def __str__(self):
s = "Pure-state spam vector with length %d holding:\n" % self.dim
s += " " + str(self.pure_state_vec)
return s
class LindbladSPAMVec(SPAMVec):
""" A Lindblad-parameterized SPAMVec (that is also expandable into terms)"""
@classmethod
def from_spamvec_obj(cls, spamvec, typ, paramType="GLND", purevec=None,
proj_basis="pp", mxBasis="pp", truncate=True,
lazy=False):
"""
Creates a LindbladSPAMVec from an existing SPAMVec object
and some additional information.
This function is different from `from_spam_vector` in that it assumes
that `spamvec` is a :class:`SPAMVec`-derived object, and if `lazy=True`
and if `spamvec` is already a matching LindbladSPAMVec, it
is returned directly. This routine is primarily used in spam vector
conversion functions, where conversion is desired only when necessary.
Parameters
----------
spamvec : SPAMVec
The spam vector object to "convert" to a
`LindbladSPAMVec`.
typ : {"prep","effect"}
Whether this is a state preparation or POVM effect vector.
paramType : str, optional
The high-level "parameter type" of the gate to create. This
specifies both which Lindblad parameters are included and what
type of evolution is used. Examples of valid values are
`"CPTP"`, `"H+S"`, `"S terms"`, and `"GLND clifford terms"`.
purevec : numpy array or SPAMVec object, optional
A SPAM vector which represents a pure-state, taken as the "ideal"
reference state when constructing the error generator of the
returned `LindbladSPAMVec`. Note that this vector
still acts on density matrices (if it's a SPAMVec it should have
a "densitymx", "svterm", or "cterm" evolution type, and if it's
a numpy array it should have the same dimension as `spamvec`).
If None, then it is taken to be `spamvec`, and so `spamvec` must
represent a pure state in this case.
proj_basis: {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object
The basis used to construct the Lindblad-term error generators onto
which the SPAM vector's error generator is projected. Allowed values
are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt), list of numpy arrays, or a custom basis object.
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
truncate : bool, optional
Whether to truncate the projections onto the Lindblad terms in
order to meet constraints (e.g. to preserve CPTP) when necessary.
If False, then an error is thrown when the given `spamvec` cannot
be realized by the specified set of Lindblad projections.
lazy : bool, optional
If True, then if `spamvec` is already a LindbladSPAMVec
with the requested details (given by the other arguments), then
`spamvec` is returned directly and no conversion/copying is
performed. If False, then a new object is always returned.
Returns
-------
LindbladSPAMVec
"""
if not isinstance(spamvec, SPAMVec):
spamvec = StaticSPAMVec(spamvec, typ=typ) # assume spamvec is just a vector
if purevec is None:
purevec = spamvec # right now, we don't try to extract a "closest pure vec"
# to spamvec - below will fail if spamvec isn't pure.
elif not isinstance(purevec, SPAMVec):
purevec = StaticSPAMVec(purevec, typ=typ) # assume spamvec is just a vector
#Break paramType in to a "base" type and an evotype
from .operation import LindbladOp as _LPGMap
bTyp, evotype, nonham_mode, param_mode = _LPGMap.decomp_paramtype(paramType)
ham_basis = proj_basis if (("H+" in bTyp) or bTyp in ("CPTP", "GLND")) else None
nonham_basis = proj_basis
def beq(b1, b2):
""" Check if bases have equal names """
b1 = b1.name if isinstance(b1, _Basis) else b1
b2 = b2.name if isinstance(b2, _Basis) else b2
return b1 == b2
def normeq(a, b):
if a is None and b is None: return True
if a is None or b is None: return False
return _mt.safenorm(a - b) < 1e-6 # what about possibility of Clifford gates?
if isinstance(spamvec, LindbladSPAMVec) \
and spamvec._evotype == evotype and spamvec.typ == typ \
and beq(ham_basis, spamvec.error_map.ham_basis) and beq(nonham_basis, spamvec.error_map.other_basis) \
and param_mode == spamvec.error_map.param_mode and nonham_mode == spamvec.error_map.nonham_mode \
and beq(mxBasis, spamvec.error_map.matrix_basis) and lazy:
#normeq(gate.pure_state_vec,purevec) \ # TODO: more checks for equality?!
return spamvec # no creation necessary!
else:
#Convert vectors (if possible) to SPAMVecs
# of the appropriate evotype and 0 params.
bDiff = spamvec is not purevec
spamvec = _convert_to_lindblad_base(spamvec, typ, evotype, mxBasis)
purevec = _convert_to_lindblad_base(purevec, typ, evotype, mxBasis) if bDiff else spamvec
assert(spamvec._evotype == evotype)
assert(purevec._evotype == evotype)
return cls.from_spam_vector(
spamvec, purevec, typ, ham_basis, nonham_basis,
param_mode, nonham_mode, truncate, mxBasis, evotype)
@classmethod
def from_spam_vector(cls, spamVec, pureVec, typ,
ham_basis="pp", nonham_basis="pp", param_mode="cptp",
nonham_mode="all", truncate=True, mxBasis="pp",
evotype="densitymx"):
"""
Creates a Lindblad-parameterized spamvec from a state vector and a basis
which specifies how to decompose (project) the vector's error generator.
spamVec : SPAMVec
the SPAM vector to initialize from. The error generator that
tranforms `pureVec` into `spamVec` forms the parameterization
of the returned LindbladSPAMVec.
pureVec : numpy array or SPAMVec
An array or SPAMVec in the *full* density-matrix space (this
vector will have the same dimension as `spamVec` - 4 in the case
of a single qubit) which represents a pure-state preparation or
projection. This is used as the "base" preparation/projection
when computing the error generator that will be parameterized.
Note that this argument must be specified, as there is no natural
default value (like the identity in the case of gates).
typ : {"prep","effect"}
Whether this is a state preparation or POVM effect vector.
ham_basis: {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object
The basis is used to construct the Hamiltonian-type lindblad error
Allowed values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt), list of numpy arrays, or a custom basis object.
nonham_basis: {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object
The basis is used to construct the Stochastic-type lindblad error
Allowed values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt), list of numpy arrays, or a custom basis object.
param_mode : {"unconstrained", "cptp", "depol", "reldepol"}
Describes how the Lindblad coefficients/projections relate to the
SPAM vector's parameter values. Allowed values are:
`"unconstrained"` (coeffs are independent unconstrained parameters),
`"cptp"` (independent parameters but constrained so map is CPTP),
`"reldepol"` (all non-Ham. diagonal coeffs take the *same* value),
`"depol"` (same as `"reldepol"` but coeffs must be *positive*)
nonham_mode : {"diagonal", "diag_affine", "all"}
Which non-Hamiltonian Lindblad projections are potentially non-zero.
Allowed values are: `"diagonal"` (only the diagonal Lind. coeffs.),
`"diag_affine"` (diagonal coefficients + affine projections), and
`"all"` (the entire matrix of coefficients is allowed).
truncate : bool, optional
Whether to truncate the projections onto the Lindblad terms in
order to meet constraints (e.g. to preserve CPTP) when necessary.
If False, then an error is thrown when the given `gate` cannot
be realized by the specified set of Lindblad projections.
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
evotype : {"densitymx","svterm","cterm"}
The evolution type of the spamvec being constructed. `"densitymx"` is
usual Lioville density-matrix-vector propagation via matrix-vector
products. `"svterm"` denotes state-vector term-based evolution
(spamvec is obtained by evaluating the rank-1 terms up to
some order). `"cterm"` is similar but stabilizer states.
Returns
-------
LindbladSPAMVec
"""
#Compute a (errgen, pureVec) pair from the given
# (spamVec, pureVec) pair.
assert(pureVec is not None), "Must supply `pureVec`!" # since there's no good default?
if not isinstance(spamVec, SPAMVec):
spamVec = StaticSPAMVec(spamVec, evotype, typ) # assume spamvec is just a vector
if not isinstance(pureVec, SPAMVec):
pureVec = StaticSPAMVec(pureVec, evotype, typ) # assume spamvec is just a vector
d2 = pureVec.dim
#Determine whether we're using sparse bases or not
sparse = None
if ham_basis is not None:
if isinstance(ham_basis, _Basis): sparse = ham_basis.sparse
elif not isinstance(ham_basis, str) and len(ham_basis) > 0:
sparse = _sps.issparse(ham_basis[0])
if sparse is None and nonham_basis is not None:
if isinstance(nonham_basis, _Basis): sparse = nonham_basis.sparse
elif not isinstance(nonham_basis, str) and len(nonham_basis) > 0:
sparse = _sps.issparse(nonham_basis[0])
if sparse is None: sparse = False # the default
if spamVec is None or spamVec is pureVec:
if sparse: errgen = _sps.csr_matrix((d2, d2), dtype='d')
else: errgen = _np.zeros((d2, d2), 'd')
else:
#Construct "spam error generator" by comparing *dense* vectors
pvdense = pureVec.todense()
svdense = spamVec.todense()
errgen = _gt.spam_error_generator(svdense, pvdense, mxBasis)
if sparse: errgen = _sps.csr_matrix(errgen)
assert(pureVec._evotype == evotype), "`pureVec` must have evotype == '%s'" % evotype
from .operation import LindbladErrorgen as _LErrorgen
from .operation import LindbladOp as _LPGMap
from .operation import LindbladDenseOp as _LPOp
errgen = _LErrorgen.from_error_generator(errgen, ham_basis,
nonham_basis, param_mode, nonham_mode,
mxBasis, truncate, evotype)
errcls = _LPOp if (pureVec.dim <= 64 and evotype == "densitymx") else _LPGMap
errmap = errcls(None, errgen)
return cls(pureVec, errmap, typ)
@classmethod
def from_lindblad_terms(cls, pureVec, Ltermdict, typ, basisdict=None,
param_mode="cptp", nonham_mode="all", truncate=True,
mxBasis="pp", evotype="densitymx"):
"""
Create a Lindblad-parameterized spamvec with a given set of Lindblad terms.
Parameters
----------
pureVec : numpy array or SPAMVec
An array or SPAMVec in the *full* density-matrix space (this
vector will have dimension 4 in the case of a single qubit) which
represents a pure-state preparation or projection. This is used as
the "base" preparation or projection that is followed or preceded
by, respectively, the parameterized Lindblad-form error generator.
Ltermdict : dict
A dictionary specifying which Linblad terms are present in the gate
parameteriztion. Keys are `(termType, basisLabel1, <basisLabel2>)`
tuples, where `termType` can be `"H"` (Hamiltonian), `"S"`
(Stochastic), or `"A"` (Affine). Hamiltonian and Affine terms always
have a single basis label (so key is a 2-tuple) whereas Stochastic
tuples with 1 basis label indicate a *diagonal* term, and are the
only types of terms allowed when `nonham_mode != "all"`. Otherwise,
Stochastic term tuples can include 2 basis labels to specify
"off-diagonal" non-Hamiltonian Lindblad terms. Basis labels can be
strings or integers. Values are complex coefficients (error rates).
typ : {"prep","effect"}
Whether this is a state preparation or POVM effect vector.
basisdict : dict, optional
A dictionary mapping the basis labels (strings or ints) used in the
keys of `Ltermdict` to basis matrices (numpy arrays or Scipy sparse
matrices).
param_mode : {"unconstrained", "cptp", "depol", "reldepol"}
Describes how the Lindblad coefficients/projections relate to the
SPAM vector's parameter values. Allowed values are:
`"unconstrained"` (coeffs are independent unconstrained parameters),
`"cptp"` (independent parameters but constrained so map is CPTP),
`"reldepol"` (all non-Ham. diagonal coeffs take the *same* value),
`"depol"` (same as `"reldepol"` but coeffs must be *positive*)
nonham_mode : {"diagonal", "diag_affine", "all"}
Which non-Hamiltonian Lindblad projections are potentially non-zero.
Allowed values are: `"diagonal"` (only the diagonal Lind. coeffs.),
`"diag_affine"` (diagonal coefficients + affine projections), and
`"all"` (the entire matrix of coefficients is allowed).
truncate : bool, optional
Whether to truncate the projections onto the Lindblad terms in
order to meet constraints (e.g. to preserve CPTP) when necessary.
If False, then an error is thrown when the given dictionary of
Lindblad terms doesn't conform to the constrains.
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
evotype : {"densitymx","svterm","cterm"}
The evolution type of the spamvec being constructed. `"densitymx"` is
usual Lioville density-matrix-vector propagation via matrix-vector
products. `"svterm"` denotes state-vector term-based evolution
(spamvec is obtained by evaluating the rank-1 terms up to
some order). `"cterm"` is similar but stabilizer states.
Returns
-------
LindbladOp
"""
#Need a dimension for error map construction (basisdict could be completely empty)
if not isinstance(pureVec, SPAMVec):
pureVec = StaticSPAMVec(pureVec, evotype, typ) # assume spamvec is just a vector
d2 = pureVec.dim
from .operation import LindbladOp as _LPGMap
errmap = _LPGMap(d2, Ltermdict, basisdict, param_mode, nonham_mode,
truncate, mxBasis, evotype)
return cls(pureVec, errmap, typ)
def __init__(self, pureVec, errormap, typ):
"""
Initialize a LindbladSPAMVec object.
Essentially a pure state preparation or projection that is followed
or preceded by, respectively, the action of LindbladDenseOp.
Parameters
----------
pureVec : numpy array or SPAMVec
An array or SPAMVec in the *full* density-matrix space (this
vector will have dimension 4 in the case of a single qubit) which
represents a pure-state preparation or projection. This is used as
the "base" preparation or projection that is followed or preceded
by, respectively, the parameterized Lindblad-form error generator.
(This argument is *not* copied if it is a SPAMVec. A numpy array
is converted to a new StaticSPAMVec.)
errormap : MapOperator
The error generator action and parameterization, encapsulated in
a gate object. Usually a :class:`LindbladOp`
or :class:`ComposedOp` object. (This argument is *not* copied,
to allow LindbladSPAMVecs to share error generator
parameters with other gates and spam vectors.)
typ : {"prep","effect"}
Whether this is a state preparation or POVM effect vector.
"""
from .operation import LindbladOp as _LPGMap
evotype = errormap._evotype
assert(evotype in ("densitymx", "svterm", "cterm")), \
"Invalid evotype: %s for %s" % (evotype, self.__class__.__name__)
if not isinstance(pureVec, SPAMVec):
pureVec = StaticSPAMVec(pureVec, evotype, typ) # assume spamvec is just a vector
assert(pureVec._evotype == evotype), \
"`pureVec` evotype must match `errormap` ('%s' != '%s')" % (pureVec._evotype, evotype)
assert(pureVec.num_params() == 0), "`pureVec` 'reference' must have *zero* parameters!"
d2 = pureVec.dim
self.state_vec = pureVec
self.error_map = errormap
self.terms = {} if evotype in ("svterm", "cterm") else None
self.local_term_poly_coeffs = {} if evotype in ("svterm", "cterm") else None
# TODO REMOVE self.direct_terms = {} if evotype in ("svterm","cterm") else None
# TODO REMOVE self.direct_term_poly_coeffs = {} if evotype in ("svterm","cterm") else None
#Create representation
if evotype == "densitymx":
assert(self.state_vec._prep_or_effect == typ), "LindbladSPAMVec prep/effect mismatch with given statevec!"
if typ == "prep":
dmRep = self.state_vec._rep
errmapRep = self.error_map._rep
rep = errmapRep.acton(dmRep) # FUTURE: do this acton in place somehow? (like C-reps do)
#maybe make a special _Errgen *state* rep?
else: # effect
dmEffectRep = self.state_vec._rep
errmapRep = self.error_map._rep
rep = replib.DMEffectRep_Errgen(errmapRep, dmEffectRep, id(self.error_map))
# an effect that applies a *named* errormap before computing with dmEffectRep
else:
rep = d2 # no representations for term-based evotypes
SPAMVec.__init__(self, rep, evotype, typ) # sets self.dim
def _update_rep(self):
if self._evotype == "densitymx":
if self._prep_or_effect == "prep":
# _rep is a DMStateRep
dmRep = self.state_vec._rep
errmapRep = self.error_map._rep
self._rep.base[:] = errmapRep.acton(dmRep).base[:] # copy from "new_rep"
else: # effect
# _rep is a DMEffectRep_Errgen, which just holds references to the
# effect and error map's representations (which we assume have been updated)
# so there's no need to update anything here
pass
def submembers(self):
"""
Get the ModelMember-derived objects contained in this one.
Returns
-------
list
"""
return [self.error_map]
def copy(self, parent=None):
"""
Copy this object.
Returns
-------
LinearOperator
A copy of this object.
"""
# We need to override this method so that embedded gate has its
# parent reset correctly.
cls = self.__class__ # so that this method works for derived classes too
copyOfMe = cls(self.state_vec, self.error_map.copy(parent), self._prep_or_effect)
return self._copy_gpindices(copyOfMe, parent)
def set_gpindices(self, gpindices, parent, memo=None):
"""
Set the parent and indices into the parent's parameter vector that
are used by this ModelMember object.
Parameters
----------
gpindices : slice or integer ndarray
The indices of this objects parameters in its parent's array.
parent : Model or ModelMember
The parent whose parameter array gpindices references.
Returns
-------
None
"""
self.terms = {} # clear terms cache since param indices have changed now
self.local_term_poly_coeffs = {}
# TODO REMOVE self.direct_terms = {}
# TODO REMOVE self.direct_term_poly_coeffs = {}
_modelmember.ModelMember.set_gpindices(self, gpindices, parent, memo)
def todense(self, scratch=None):
"""
Return this SPAM vector as a (dense) numpy array. The memory
in `scratch` maybe used when it is not-None.
"""
if self._prep_or_effect == "prep":
#error map acts on dmVec
return _np.dot(self.error_map.todense(), self.state_vec.todense())
else:
#Note: if this is an effect vector, self.error_map is the
# map that acts on the *state* vector before dmVec acts
# as an effect: E.T -> dot(E.T,errmap) ==> E -> dot(errmap.T,E)
return _np.dot(self.error_map.todense().conjugate().T, self.state_vec.todense())
#def torep(self, typ, outvec=None):
# """
# Return a "representation" object for this SPAMVec.
#
# Such objects are primarily used internally by pyGSTi to compute
# things like probabilities more efficiently.
#
# Returns
# -------
# StateRep
# """
# if self._evotype == "densitymx":
#
# if typ == "prep":
# dmRep = self.state_vec.torep(typ)
# errmapRep = self.error_map.torep()
# return errmapRep.acton(dmRep) # FUTURE: do this acton in place somehow? (like C-reps do)
# #maybe make a special _Errgen *state* rep?
#
# else: # effect
# dmEffectRep = self.state_vec.torep(typ)
# errmapRep = self.error_map.torep()
# return replib.DMEffectRep_Errgen(errmapRep, dmEffectRep, id(self.error_map))
# # an effect that applies a *named* errormap before computing with dmEffectRep
#
# else:
# #framework should not be calling "torep" on states w/a term-based evotype...
# # they should call torep on the *terms* given by get_taylor_order_terms(...)
# raise ValueError("Invalid evotype '%s' for %s.torep(...)" %
# (self._evotype, self.__class__.__name__))
def get_taylor_order_terms(self, order, max_poly_vars=100, return_coeff_polys=False):
"""
Get the `order`-th order Taylor-expansion terms of this SPAM vector.
This function either constructs or returns a cached list of the terms at
the given order. Each term is "rank-1", meaning that it is a state
preparation followed by or POVM effect preceded by actions on a
density matrix `rho` of the form:
`rho -> A rho B`
The coefficients of these terms are typically polynomials of the
SPAMVec's parameters, where the polynomial's variable indices index the
*global* parameters of the SPAMVec's parent (usually a :class:`Model`)
, not the SPAMVec's local parameter array (i.e. that returned from
`to_vector`).
Parameters
----------
order : int
The order of terms to get.
return_coeff_polys : bool
Whether a parallel list of locally-indexed (using variable indices
corresponding to *this* object's parameters rather than its parent's)
polynomial coefficients should be returned as well.
Returns
-------
terms : list
A list of :class:`RankOneTerm` objects.
coefficients : list
Only present when `return_coeff_polys == True`.
A list of *compact* polynomial objects, meaning that each element
is a `(vtape,ctape)` 2-tuple formed by concatenating together the
output of :method:`Polynomial.compact`.
"""
if order not in self.terms:
if self._evotype not in ('svterm', 'cterm'):
raise ValueError("Invalid evolution type %s for calling `get_taylor_order_terms`" % self._evotype)
assert(self.gpindices is not None), "LindbladSPAMVec must be added to a Model before use!"
state_terms = self.state_vec.get_taylor_order_terms(0, max_poly_vars); assert(len(state_terms) == 1)
stateTerm = state_terms[0]
err_terms, cpolys = self.error_map.get_taylor_order_terms(order, max_poly_vars, True)
if self._prep_or_effect == "prep":
terms = [_term.compose_terms((stateTerm, t)) for t in err_terms] # t ops occur *after* stateTerm's
else: # "effect"
# Effect terms are special in that all their pre/post ops act in order on the *state* before the final
# effect is used to compute a probability. Thus, constructing the same "terms" as above works here
# too - the difference comes when this SPAMVec is used as an effect rather than a prep.
terms = [_term.compose_terms((stateTerm, t)) for t in err_terms] # t ops occur *after* stateTerm's
#OLD: now this is done within calculator when possible b/c not all terms can be collapsed
#terms = [ t.collapse() for t in terms ] # collapse terms for speed
# - resulting in terms with just a single pre/post op, each == a pure state
#assert(stateTerm.coeff == Polynomial_1.0) # TODO... so can assume local polys are same as for errorgen
self.local_term_poly_coeffs[order] = cpolys
self.terms[order] = terms
if return_coeff_polys:
return self.terms[order], self.local_term_poly_coeffs[order]
else:
return self.terms[order]
def get_taylor_order_terms_above_mag(self, order, max_poly_vars, min_term_mag):
state_terms = self.state_vec.get_taylor_order_terms(0, max_poly_vars); assert(len(state_terms) == 1)
stateTerm = state_terms[0]
stateTerm = stateTerm.copy_with_magnitude(1.0)
#assert(stateTerm.coeff == Polynomial_1.0) # TODO... so can assume local polys are same as for errorgen
err_terms = self.error_map.get_taylor_order_terms_above_mag(
order, max_poly_vars, min_term_mag / stateTerm.magnitude)
#This gives the appropriate logic, but *both* prep or effect results in *same* expression, so just run it:
#if self._prep_or_effect == "prep":
# terms = [_term.compose_terms((stateTerm, t)) for t in err_terms] # t ops occur *after* stateTerm's
#else: # "effect"
# # Effect terms are special in that all their pre/post ops act in order on the *state* before the final
# # effect is used to compute a probability. Thus, constructing the same "terms" as above works here
# # too - the difference comes when this SPAMVec is used as an effect rather than a prep.
# terms = [_term.compose_terms((stateTerm, t)) for t in err_terms] # t ops occur *after* stateTerm's
terms = [_term.compose_terms_with_mag((stateTerm, t), stateTerm.magnitude * t.magnitude)
for t in err_terms] # t ops occur *after* stateTerm's
return terms
def get_total_term_magnitude(self):
"""
Get the total (sum) of the magnitudes of all this SPAM vector's terms.
The magnitude of a term is the absolute value of its coefficient, so
this function returns the number you'd get from summing up the
absolute-coefficients of all the Taylor terms (at all orders!) you
get from expanding this SPAM vector in a Taylor series.
Returns
-------
float
"""
# return (sum of absvals of *all* term coeffs)
return self.error_map.get_total_term_magnitude() # error map is only part with terms
def get_total_term_magnitude_deriv(self):
"""
Get the derivative of the total (sum) of the magnitudes of all this
operator's terms with respect to the operators (local) parameters.
Returns
-------
numpy array
An array of length self.num_params()
"""
return self.error_map.get_total_term_magnitude_deriv()
def deriv_wrt_params(self, wrtFilter=None):
"""
Construct a matrix whose columns are the derivatives of the SPAM vector
with respect to a single param. Thus, each column is of length
get_dimension and there is one column per SPAM vector parameter.
Returns
-------
numpy array
Array of derivatives, shape == (dimension, num_params)
"""
dmVec = self.state_vec.todense()
derrgen = self.error_map.deriv_wrt_params(wrtFilter) # shape (dim*dim, nParams)
derrgen.shape = (self.dim, self.dim, derrgen.shape[1]) # => (dim,dim,nParams)
if self._prep_or_effect == "prep":
#derror map acts on dmVec
#return _np.einsum("ijk,j->ik", derrgen, dmVec) # return shape = (dim,nParams)
return _np.tensordot(derrgen, dmVec, (1, 0)) # return shape = (dim,nParams)
else:
# self.error_map acts on the *state* vector before dmVec acts
# as an effect: E.dag -> dot(E.dag,errmap) ==> E -> dot(errmap.dag,E)
#return _np.einsum("jik,j->ik", derrgen.conjugate(), dmVec) # return shape = (dim,nParams)
return _np.tensordot(derrgen.conjugate(), dmVec, (0, 0)) # return shape = (dim,nParams)
def hessian_wrt_params(self, wrtFilter1=None, wrtFilter2=None):
"""
Construct the Hessian of this SPAM vector with respect to its parameters.
This function returns a tensor whose first axis corresponds to the
flattened operation matrix and whose 2nd and 3rd axes correspond to the
parameters that are differentiated with respect to.
Parameters
----------
wrtFilter1, wrtFilter2 : list
Lists of indices of the paramters to take first and second
derivatives with respect to. If None, then derivatives are
taken with respect to all of the vectors's parameters.
Returns
-------
numpy array
Hessian with shape (dimension, num_params1, num_params2)
"""
dmVec = self.state_vec.todense()
herrgen = self.error_map.hessian_wrt_params(wrtFilter1, wrtFilter2) # shape (dim*dim, nParams1, nParams2)
herrgen.shape = (self.dim, self.dim, herrgen.shape[1], herrgen.shape[2]) # => (dim,dim,nParams1, nParams2)
if self._prep_or_effect == "prep":
#derror map acts on dmVec
#return _np.einsum("ijkl,j->ikl", herrgen, dmVec) # return shape = (dim,nParams)
return _np.tensordot(herrgen, dmVec, (1, 0)) # return shape = (dim,nParams)
else:
# self.error_map acts on the *state* vector before dmVec acts
# as an effect: E.dag -> dot(E.dag,errmap) ==> E -> dot(errmap.dag,E)
#return _np.einsum("jikl,j->ikl", herrgen.conjugate(), dmVec) # return shape = (dim,nParams)
return _np.tensordot(herrgen.conjugate(), dmVec, (0, 0)) # return shape = (dim,nParams)
def num_params(self):
"""
Get the number of independent parameters which specify this SPAM vector.
Returns
-------
int
the number of independent parameters.
"""
return self.error_map.num_params()
def to_vector(self):
"""
Extract a vector of the underlying gate parameters from this gate.
Returns
-------
numpy array
a 1D numpy array with length == num_params().
"""
return self.error_map.to_vector()
def from_vector(self, v, close=False, nodirty=False):
"""
Initialize the gate using a vector of its parameters.
Parameters
----------
v : numpy array
The 1D vector of gate parameters. Length
must == num_params().
Returns
-------
None
"""
self.error_map.from_vector(v, close, nodirty)
self._update_rep()
if not nodirty: self.dirty = True
def transform(self, S, typ):
"""
Update SPAM (column) vector V as inv(S) * V or S^T * V for preparation
or effect SPAM vectors, respectively.
Note that this is equivalent to state preparation vectors getting
mapped: `rho -> inv(S) * rho` and the *transpose* of effect vectors
being mapped as `E^T -> E^T * S`.
Generally, the transform function updates the *parameters* of
the SPAM vector such that the resulting vector is altered as
described above. If such an update cannot be done (because
the gate parameters do not allow for it), ValueError is raised.
Parameters
----------
S : GaugeGroupElement
A gauge group element which specifies the "S" matrix
(and it's inverse) used in the above similarity transform.
typ : { 'prep', 'effect' }
Which type of SPAM vector is being transformed (see above).
"""
#Defer everything to LindbladOp's
# `spam_tranform` function, which applies either
# error_map -> inv(S) * error_map ("prep" case) OR
# error_map -> error_map * S ("effect" case)
self.error_map.spam_transform(S, typ)
self._update_rep()
self.dirty = True
def depolarize(self, amount):
"""
Depolarize this SPAM vector by the given `amount`.
Generally, the depolarize function updates the *parameters* of
the SPAMVec such that the resulting vector is depolarized. If
such an update cannot be done (because the gate parameters do not
allow for it), ValueError is raised.
Parameters
----------
amount : float or tuple
The amount to depolarize by. If a tuple, it must have length
equal to one less than the dimension of the spam vector. All but
the first element of the spam vector (often corresponding to the
identity element) are multiplied by `amount` (if a float) or
the corresponding `amount[i]` (if a tuple).
Returns
-------
None
"""
self.error_map.depolarize(amount)
self._update_rep()
class StabilizerSPAMVec(SPAMVec):
"""
A stabilizer state preparation represented internally using a compact
representation of its stabilizer group.
"""
@classmethod
def from_dense_purevec(cls, purevec):
"""
Create a new StabilizerSPAMVec from a pure-state vector.
Currently, purevec must be a single computational basis state (it
cannot be a superpostion of multiple of them).
Parameters
----------
purevec : numpy.ndarray
A complex-valued state vector specifying a pure state in the
standard computational basis. This vector has length 2^n for
n qubits.
Returns
-------
StabilizerSPAMVec
"""
nqubits = int(round(_np.log2(len(purevec))))
v = (_np.array([1, 0], 'd'), _np.array([0, 1], 'd')) # (v0,v1)
for zvals in _itertools.product(*([(0, 1)] * nqubits)):
testvec = _functools.reduce(_np.kron, [v[i] for i in zvals])
if _np.allclose(testvec, purevec.flat):
return cls(nqubits, zvals)
raise ValueError(("Given `purevec` must be a z-basis product state - "
"cannot construct StabilizerSPAMVec"))
def __init__(self, nqubits, zvals=None, sframe=None):
"""
Initialize a StabilizerSPAMVec object.
Parameters
----------
nqubits : int
Number of qubits
zvals : iterable, optional
An iterable over anything that can be cast as True/False
to indicate the 0/1 value of each qubit in the Z basis.
If None, the all-zeros state is created.
sframe : StabilizerFrame, optional
A complete stabilizer frame to initialize this state from.
If this is not None, then `nqubits` and `zvals` must be None.
"""
if sframe is not None:
assert(nqubits is None and zvals is None), "`nqubits` and `zvals` must be None when `sframe` isn't!"
self.sframe = sframe
else:
self.sframe = _stabilizer.StabilizerFrame.from_zvals(nqubits, zvals)
rep = self.sframe.torep() # dim == 2**nqubits
SPAMVec.__init__(self, rep, "stabilizer", "prep")
def todense(self, scratch=None):
"""
Return this SPAM vector as a (dense) numpy array of shape
(2^(nqubits), 1). The memory in `scratch` maybe used when
it is not-None.
"""
statevec = self.sframe.to_statevec()
statevec.shape = (statevec.size, 1)
return statevec
#def torep(self, typ, outvec=None):
# """
# Return a "representation" object for this SPAMVec.
#
# Such objects are primarily used internally by pyGSTi to compute
# things like probabilities more efficiently.
#
# Returns
# -------
# SBStateRep
# """
# return self.sframe.torep()
def __str__(self):
s = "Stabilizer spam vector for %d qubits with rep:\n" % (self.sframe.nqubits)
s += str(self.sframe)
return s
class StabilizerEffectVec(SPAMVec): # FUTURE: merge this with ComptationalSPAMVec (w/evotype == "stabilizer")?
"""
A dummy SPAM vector that points to a set/product of 1-qubit POVM
outcomes from stabilizer-state measurements.
"""
@classmethod
def from_dense_purevec(cls, purevec):
"""
Create a new StabilizerEffectVec from a pure-state vector.
Currently, purevec must be a single computational basis state (it
cannot be a superpostion of multiple of them).
Parameters
----------
purevec : numpy.ndarray
A complex-valued state vector specifying a pure state in the
standard computational basis. This vector has length 2^n for
n qubits.
Returns
-------
StabilizerSPAMVec
"""
nqubits = int(round(_np.log2(len(purevec))))
v = (_np.array([1, 0], 'd'), _np.array([0, 1], 'd')) # (v0,v1)
for zvals in _itertools.product(*([(0, 1)] * nqubits)):
testvec = _functools.reduce(_np.kron, [v[i] for i in zvals])
if _np.allclose(testvec, purevec.flat):
return cls(zvals)
raise ValueError(("Given `purevec` must be a z-basis product state - "
"cannot construct StabilizerEffectVec"))
def __init__(self, outcomes):
"""
Initialize a StabilizerEffectVec object.
Parameters
----------
outcomes : iterable
A list or other iterable of integer 0 or 1 outcomes specifying
which POVM effect vector this object represents within the
full `stabilizerPOVM`
"""
self._outcomes = _np.ascontiguousarray(_np.array(outcomes, int), _np.int64)
#Note: dtype='i' => int in Cython, whereas dtype=int/np.int64 => long in Cython
rep = replib.SBEffectRep(self._outcomes) # dim == 2**nqubits == 2**len(outcomes)
SPAMVec.__init__(self, rep, "stabilizer", "effect")
#def torep(self, typ, outvec=None):
# # changes to_statevec/to_dmvec -> todense, and have
# # torep create an effect rep object...
# return replib.SBEffectRep(_np.ascontiguousarray(self._outcomes, _np.int64))
def todense(self):
"""
Return this SPAM vector as a dense state vector of shape
(2^(nqubits), 1)
Returns
-------
numpy array
"""
v = (_np.array([1, 0], 'd'), _np.array([0, 1], 'd')) # (v0,v1) - eigenstates of sigma_z
statevec = _functools.reduce(_np.kron, [v[i] for i in self.outcomes])
statevec.shape = (statevec.size, 1)
return statevec
@property
def outcomes(self):
""" The 0/1 outcomes identifying this effect within its StabilizerZPOVM """
return self._outcomes
def __str__(self):
nQubits = len(self.outcomes)
s = "Stabilizer effect vector for %d qubits with outcome %s" % (nQubits, str(self.outcomes))
return s
class ComputationalSPAMVec(SPAMVec):
"""
A static SPAM vector that is tensor product of 1-qubit Z-eigenstates.
This is called a "computational basis state" in many contexts.
"""
@classmethod
def from_dense_vec(cls, vec, evotype):
"""
Create a new ComputationalSPAMVec from a dense vector.
Parameters
----------
vec : numpy.ndarray
A state vector specifying a computational basis state in the
standard basis. This vector has length 2^n or 4^n for
n qubits depending on `evotype`.
evotype : {"densitymx", "statevec", "stabilizer", "svterm", "cterm"}
The evolution type of the resulting SPAM vector. This value
must be consistent with `len(vec)`, in that `"statevec"` and
`"stabilizer"` expect 2^n whereas the rest expect 4^n.
Returns
-------
ComputationalSPAMVec
"""
if evotype in ('stabilizer', 'statevec'):
nqubits = int(round(_np.log2(len(vec))))
v0 = _np.array((1, 0), complex) # '0' qubit state as complex state vec
v1 = _np.array((0, 1), complex) # '1' qubit state as complex state vec
else:
nqubits = int(round(_np.log2(len(vec)) / 2))
v0 = 1.0 / _np.sqrt(2) * _np.array((1, 0, 0, 1), 'd') # '0' qubit state as Pauli dmvec
v1 = 1.0 / _np.sqrt(2) * _np.array((1, 0, 0, -1), 'd') # '1' qubit state as Pauli dmvec
v = (v0, v1)
for zvals in _itertools.product(*([(0, 1)] * nqubits)):
testvec = _functools.reduce(_np.kron, [v[i] for i in zvals])
if | _np.allclose(testvec, vec.flat) | numpy.allclose |
"""62-make-diffusionmaps-and-geometricharmonicsinterpolator-compatible-with-scikit-learn-api
Unit test for the Geometric Harmonics module.
"""
import unittest
import diffusion_maps as legacy_dmap
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_swiss_roll
from sklearn.model_selection import ParameterGrid
from sklearn.utils.estimator_checks import check_estimator
from datafold.dynfold.outofsample import (
GeometricHarmonicsInterpolator,
LaplacianPyramidsInterpolator,
MultiScaleGeometricHarmonicsInterpolator,
)
from datafold.dynfold.tests.helper import *
from datafold.pcfold.distance import IS_IMPORTED_RDIST
from datafold.pcfold.kernels import DmapKernelFixed, GaussianKernel
def plot_scatter(points: np.ndarray, values: np.ndarray, **kwargs) -> None:
title = kwargs.pop("title", None)
if title:
plt.title(title)
plt.scatter(
points[:, 0],
points[:, 1],
c=values,
marker="o",
rasterized=True,
s=2.5,
**kwargs,
)
cb = plt.colorbar()
cb.set_clim([np.min(values), np.max(values)])
cb.set_ticks(np.linspace(np.min(values), np.max(values), 5))
plt.xlim([-4, 4])
plt.ylim([-4, 4])
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.gca().set_aspect("equal")
def f(points: np.ndarray) -> np.ndarray:
"""Function to interpolate."""
# return np.ones(points.shape[0])
# return np.arange(points.shape[0])
return np.sin(np.linalg.norm(points, axis=-1))
class GeometricHarmonicsTest(unittest.TestCase):
# TODO: not tested yet:
# * error measurements (kfold, etc.), also with nD interpolation
def setUp(self):
# self.num_points = 1000
# self.points = downsample(np.load('data.npy'), self.num_points)
# self.values = np.ones(self.num_points)
# np.save('actual-data.npy', self.points)
# self.points = np.load('actual-data.npy')
# self.num_points = self.points.shape[0]
# self.values = np.ones(self.num_points)
self.points = make_points(23, -4, -4, 4, 4)
self.num_points = self.points.shape[0]
self.values = f(self.points)
def test_valid_sklearn_estimator(self):
# disable check on boston housing dataset
# see: https://scikit-learn.org/stable/developers/develop.html#estimator-tags
estimator = GeometricHarmonicsInterpolator(n_eigenpairs=1)
for estimator, check in check_estimator(estimator, generate_only=True):
check(estimator)
self.assertTrue(estimator._get_tags()["multioutput"])
self.assertTrue(estimator._get_tags()["requires_y"])
def test_geometric_harmonics_interpolator(self, plot=False):
eps = 1e-1
ghi = GeometricHarmonicsInterpolator(
GaussianKernel(epsilon=eps),
n_eigenpairs=self.num_points - 3,
dist_kwargs=dict(cut_off=1e1 * eps),
)
ghi = ghi.fit(self.points, self.values)
points = make_points(100, -4, -4, 4, 4)
values = ghi.predict(points)
residual = values - f(points)
self.assertLess(np.max(np.abs(residual)), 7.5e-2)
print(f"Original function={f(points)}")
print(f"Sampled points={self.values}")
print(f"Reconstructed function={values}")
print(f"Residual={residual}")
if plot:
plt.subplot(2, 2, 1)
plot_scatter(points, f(points), title="Original function")
plt.subplot(2, 2, 2)
plot_scatter(self.points, self.values, title="Sampled function")
plt.subplot(2, 2, 4)
plot_scatter(points, values, title="Reconstructed function")
plt.subplot(2, 2, 3)
plot_scatter(points, residual, title="Residual", cmap="RdBu_r")
plt.tight_layout()
plt.show()
def test_eigenfunctions(self, plot=False):
eps = 1e1
cut_off = 1e1 * eps
n_eigenpairs = 3
points = make_strip(0, 0, 1, 1e-1, 3000)
dm = DiffusionMaps(
GaussianKernel(epsilon=eps),
n_eigenpairs=n_eigenpairs,
dist_kwargs=dict(cut_off=1e100),
).fit(points)
setting = {
"kernel": GaussianKernel(eps),
"n_eigenpairs": n_eigenpairs,
"is_stochastic": False,
"dist_kwargs": dict(cut_off=cut_off),
}
ev1 = GeometricHarmonicsInterpolator(**setting).fit(
points, dm.eigenvectors_[:, 1]
)
ev2 = GeometricHarmonicsInterpolator(**setting).fit(
points, dm.eigenvectors_[:, 2]
)
rel_err1 = np.linalg.norm(
dm.eigenvectors_[:, 1] - ev1.predict(points), np.inf
) / np.linalg.norm(dm.eigenvectors_[:, 1], np.inf)
self.assertAlmostEqual(rel_err1, 0, places=1)
rel_err2 = np.linalg.norm(
dm.eigenvectors_[:, 2] - ev2.predict(points), np.inf
) / np.linalg.norm(dm.eigenvectors_[:, 2], np.inf)
self.assertAlmostEqual(rel_err2, 0, places=1)
if plot:
new_points = make_points(50, 0, 0, 1, 1e-1)
ev1i = ev1.predict(new_points)
ev2i = ev2.predict(new_points)
plt.subplot(1, 2, 1)
plt.scatter(new_points[:, 0], new_points[:, 1], c=ev1i, cmap="RdBu_r")
plt.subplot(1, 2, 2)
plt.scatter(new_points[:, 0], new_points[:, 1], c=ev2i, cmap="RdBu_r")
plt.show()
def test_dense_sparse(self):
data, _ = make_swiss_roll(n_samples=1000, noise=0, random_state=1)
dim_red_eps = 1.25
dense_setting = {
"kernel": GaussianKernel(dim_red_eps),
"n_eigenpairs": 6,
"is_stochastic": False,
"dist_kwargs": dict(cut_off=np.inf),
}
sparse_setting = {
"kernel": GaussianKernel(dim_red_eps),
"n_eigenpairs": 6,
"is_stochastic": False,
"dist_kwargs": dict(cut_off=1e100),
}
dmap_dense = DiffusionMaps(**dense_setting).fit(data)
values = dmap_dense.eigenvectors_[:, 1]
dmap_sparse = DiffusionMaps(**sparse_setting).fit(data)
# Check if any error occurs (functional test) and whether the provided DMAP is
# changed in any way.
gh_dense = GeometricHarmonicsInterpolator(**dense_setting).fit(data, values)
gh_sparse = GeometricHarmonicsInterpolator(**sparse_setting).fit(data, values)
self.assertEqual(gh_dense._dmap_kernel, dmap_dense._dmap_kernel)
self.assertEqual(gh_sparse._dmap_kernel, dmap_sparse._dmap_kernel)
# The parameters are set equal to the previously generated DMAP, therefore both
# have to be equal.
gh_dense_cmp = GeometricHarmonicsInterpolator(**dense_setting).fit(
data, values, store_kernel_matrix=True
)
gh_sparse_cmp = GeometricHarmonicsInterpolator(**sparse_setting).fit(
data, values, store_kernel_matrix=True
)
self.assertEqual(gh_dense_cmp._dmap_kernel, dmap_dense._dmap_kernel)
self.assertEqual(gh_sparse_cmp._dmap_kernel, dmap_sparse._dmap_kernel)
# Check the the correct format is set
self.assertTrue(isinstance(gh_dense_cmp.kernel_matrix_, np.ndarray))
self.assertTrue(isinstance(gh_sparse_cmp.kernel_matrix_, csr_matrix))
gh_dense_cmp.predict(data)
gh_sparse_cmp.predict(data)
# Check if sparse (without cutoff) and dense case give close results
nptest.assert_allclose(
gh_sparse_cmp.predict(data),
gh_dense_cmp.predict(data),
rtol=1e-14,
atol=1e-15,
)
nptest.assert_allclose(
gh_sparse_cmp.gradient(data),
gh_dense_cmp.gradient(data),
rtol=1e-14,
atol=1e-15,
)
def test_variable_number_of_points(self):
# Simply check if something fails
np.random.seed(1)
data = np.random.randn(100, 5)
values = np.random.randn(100)
parameter_grid = ParameterGrid(
{
"is_stochastic": [False],
"alpha": [0, 1],
"dist_kwargs": [
dict(cut_off=10),
dict(cut_off=100),
dict(cut_off=np.inf),
],
}
)
for setting in parameter_grid:
gh = GeometricHarmonicsInterpolator(
GaussianKernel(epsilon=0.01), n_eigenpairs=3, **setting
).fit(data, values)
# larger number of samples than original data
oos_data = np.random.randn(200, 5)
gh.predict(oos_data)
gh.gradient(oos_data)
oos_data = np.random.randn(100, 5) # same size as original data
gh.predict(oos_data)
gh.gradient(oos_data)
oos_data = np.random.randn(50, 5) # less than original data
gh.predict(oos_data)
gh.gradient(oos_data)
oos_data = np.random.randn(1, 5) # single sample
gh.predict(oos_data)
gh.gradient(oos_data)
@unittest.skip(reason="functionality and testing not finished")
def test_multiscale(self):
x_lims_train = (0, 10)
y_lims_train = (0, 10)
x_lims_test = (-2, 12)
y_lims_test = (-2, 12)
nr_sample_x_train = 30
nr_sample_y_train = 30
nr_sample_x_test = 200
nr_sample_y_test = 200
xx, yy = np.meshgrid(
np.linspace(*x_lims_train, nr_sample_x_train),
np.linspace(*y_lims_train, nr_sample_y_train),
)
zz = np.sin(yy) * np.sin(xx)
X_train = np.vstack(
[xx.reshape(np.product(xx.shape)), yy.reshape(np.product(yy.shape))]
).T
y_train = zz.reshape(np.product(zz.shape))
xx_oos, yy_oos = np.meshgrid(
np.linspace(*x_lims_test, nr_sample_x_test),
np.linspace(*y_lims_test, nr_sample_y_test),
)
zz_oos = np.sin(yy_oos) * np.sin(xx_oos)
X_oos = np.vstack(
[
xx_oos.reshape(np.product(xx_oos.shape)),
yy_oos.reshape(np.product(yy_oos.shape)),
]
).T
y_test = zz_oos.reshape(np.product(zz_oos.shape))
gh_single_interp = GeometricHarmonicsInterpolator(
epsilon=13.0,
n_eigenpairs=130,
alpha=0,
is_stochastic=False
# condition=1.0,
# admissible_error=1,
# initial_scale=5,
).fit(X_train, y_train)
gh_multi_interp = MultiScaleGeometricHarmonicsInterpolator(
initial_scale=50, n_eigenpairs=11, condition=50, admissible_error=0.4
).fit(X_train, y_train)
print("-----------------")
print("Residuum (train error):")
score_single_train = gh_single_interp.score(X_train, y_train)
score_multi_train = gh_multi_interp.score(X_train, y_train)
print(f"gh single = {score_single_train}")
print(f"gh multi = {score_multi_train}")
print("---")
print("Test error:")
score_single_test = gh_single_interp.score(X_oos, y_test)
score_multi_test = gh_multi_interp.score(X_oos, y_test)
print(f"gh single = {score_single_test}")
print(f"gh multi = {score_multi_test}")
print("----------------- \n")
#################################################################################
#################################################################################
#################################################################################
# TRAIN DATA
f, ax = plt.subplots(2, 3, sharex=True, sharey=True)
cur_row = ax[0]
cur_row[0].contourf(xx, yy, zz)
vlim = (np.min(zz), np.max(zz))
cur_row[0].plot(xx, yy, ".", c="k")
cur_row[0].set_title("Original")
# plt.figure("Single-scale geometric harmonics")
cur_row[1].plot(xx, yy, ".", c="k")
cur_row[1].contourf(
xx,
yy,
gh_single_interp.predict(X_train).reshape(
nr_sample_x_train, nr_sample_y_train
),
vmin=vlim[0],
vmax=vlim[1],
)
cur_row[1].set_title("Single geometric harmonics")
cur_row[2].plot(xx, yy, ".", c="k")
cur_row[2].contourf(
xx,
yy,
gh_multi_interp(X_train).reshape(nr_sample_x_train, nr_sample_y_train),
vmin=vlim[0],
vmax=vlim[1],
)
cur_row[2].set_title("Multi-scale geometric")
cur_row = ax[1]
abs_diff_single_train = np.abs(
zz
- gh_single_interp.predict(X_train).reshape(
nr_sample_x_train, nr_sample_y_train
)
)
abs_diff_multi_train = np.abs(
zz - gh_multi_interp(X_train).reshape(nr_sample_x_train, nr_sample_y_train)
)
vmin = np.min([abs_diff_single_train.min(), abs_diff_multi_train.min()])
vmax = np.max([abs_diff_single_train.max(), abs_diff_multi_train.max()])
cur_row[1].set_title("abs difference single scale")
cnf = cur_row[1].contourf(
xx, yy, abs_diff_single_train, cmap="Reds", vmin=vmin, vmax=vmax
)
# f.colorbar(cnf)
cur_row[1].plot(xx, yy, ".", c="k")
cur_row[2].set_title("abs difference multi scale")
cnf = cur_row[2].contourf(
xx, yy, abs_diff_multi_train, cmap="Reds", vmin=vmin, vmax=vmax
)
# f.colorbar(cnf)
cur_row[2].plot(xx, yy, ".", c="k")
#################################################################################
#################################################################################
#################################################################################
# OOS DATA
f, ax = plt.subplots(2, 3, sharex=True, sharey=True)
cur_row = ax[0]
cur_row[0].contourf(xx_oos, yy_oos, zz_oos)
vlim = (np.min(zz_oos), np.max(zz_oos))
cur_row[0].set_title("Original")
cur_row[1].set_title("Single geometric harmonics")
cur_row[1].contourf(
xx_oos,
yy_oos,
gh_single_interp.predict(X_oos).reshape(nr_sample_x_test, nr_sample_y_test),
vmin=vlim[0],
vmax=vlim[1],
)
cur_row[2].set_title("Multi scale geometric harmonics")
cur_row[2].contourf(
xx_oos,
yy_oos,
gh_multi_interp(X_oos).reshape(nr_sample_x_test, nr_sample_y_test),
vmin=vlim[0],
vmax=vlim[1],
)
cur_row = ax[1]
abs_diff_single_train = np.abs(
zz_oos
- gh_single_interp.predict(X_oos).reshape(
nr_sample_x_test, nr_sample_y_test
)
)
abs_diff_multi_train = np.abs(
zz_oos - gh_multi_interp(X_oos).reshape(nr_sample_x_test, nr_sample_y_test)
)
vmin = np.min([abs_diff_single_train.min(), abs_diff_multi_train.min()])
vmax = np.max([abs_diff_single_train.max(), abs_diff_multi_train.max()])
cur_row[1].set_title("abs difference single scale")
cnf = cur_row[1].contourf(
xx_oos, yy_oos, abs_diff_single_train, cmap="Reds", vmin=vmin, vmax=vmax
)
# f.colorbar(cnf)
cur_row[2].set_title("abs difference multi scale")
cnf = cur_row[2].contourf(
xx_oos, yy_oos, abs_diff_multi_train, cmap="Reds", vmin=vmin, vmax=vmax
)
# f.colorbar(cnf)
plt.show()
@unittest.skipIf(not IS_IMPORTED_RDIST, "rdist is not available")
def test_different_backends(self):
data, _ = make_swiss_roll(1000, random_state=1)
eps_interp = 100 # in this case much larger compared to 1.25 for dim. reduction
n_eigenpairs = 50
setting = {
"kernel": GaussianKernel(eps_interp),
"n_eigenpairs": n_eigenpairs,
"dist_kwargs": dict(cut_off=1e100, backend="scipy.kdtree"),
}
setting2 = {
"kernel": GaussianKernel(eps_interp),
"n_eigenpairs": n_eigenpairs,
"dist_kwargs": dict(cut_off=1e100, backend="scipy.kdtree"),
}
actual_phi_rdist = GeometricHarmonicsInterpolator(**setting).fit(
data, data[:, 0]
)
actual_phi_kdtree = GeometricHarmonicsInterpolator(**setting2).fit(
data, data[:, 0]
)
nptest.assert_allclose(
actual_phi_rdist.eigenvalues_,
actual_phi_kdtree.eigenvalues_,
atol=9e-14,
rtol=1e-14,
)
assert_equal_eigenvectors(
actual_phi_rdist.eigenvectors_, actual_phi_kdtree.eigenvectors_
)
result_rdist = actual_phi_rdist.predict(data)
result_kdtree = actual_phi_kdtree.predict(data)
# TODO: it is not clear why relative large tolerances are required... (also see
# further below).
nptest.assert_allclose(result_rdist, result_kdtree, atol=1e-12, rtol=1e-13)
# def test_gradient(self):
# xx, yy = np.meshgrid(np.linspace(0, 10, 20), np.linspace(0, 100, 20))
# zz = xx + np.sin(yy)
#
# data_points = np.vstack(
# [xx.reshape(np.product(xx.shape)), yy.reshape(np.product(yy.shape))]
# ).T
# target_values = zz.reshape(np.product(zz.shape))
#
# gh_interp = GeometricHarmonicsInterpolator(epsilon=100, n_eigenpairs=50)
# gh_interp = gh_interp.fit(data_points, target_values)
# score = gh_interp.score(data_points, target_values)
# print(f"score={score}")
#
# plt.figure()
# plt.contourf(xx, yy, zz)
# plt.figure()
# plt.contourf(xx, yy, gh_interp(data_points).reshape(20, 20))
#
# grad_x = xx
# grad_y = np.cos(yy)
# grad = np.vstack(
# [
# grad_x.reshape(np.product(grad_x.shape)),
# grad_y.reshape(np.product(grad_y.shape)),
# ]
# ).T
#
# print(np.linalg.norm(gh_interp.gradient(data_points) - grad))
def test_stochastic_kernel(self):
# Currently, only check if it runs through (with is_stochastic=True
data = np.linspace(0, 2 * np.pi, 40)[:, np.newaxis]
values = np.sin(data)
gh_interp = GeometricHarmonicsInterpolator(
kernel=GaussianKernel(epsilon=0.5),
n_eigenpairs=30,
is_stochastic=True,
alpha=0,
symmetrize_kernel=False,
dist_kwargs=dict(cut_off=np.inf),
).fit(data, values)
score = gh_interp.score(data, values)
# NOTE: if is_stochastic=True and alpha =0, the GH is not able to reproduce the
# sin curve exactly.
# To identify changes in the implementation, this checks against a reference
# solution
print(score)
# Somehow, the remote computer produces a slightly different result...
reference = 0.04836717878208042
self.assertLessEqual(score, reference)
def test_renormalization_kernel(self, plot=False):
# Currently, only check if it runs through (with is_stochastic=True)
data = np.linspace(0, 2 * np.pi, 100)[:, np.newaxis]
values = np.sin(data)
from scipy.spatial.distance import pdist
gh_interp = GeometricHarmonicsInterpolator(
GaussianKernel(epsilon=2),
n_eigenpairs=30,
is_stochastic=True,
alpha=1,
symmetrize_kernel=True,
dist_kwargs=dict(
cut_off=np.inf,
),
).fit(data, values)
data_interp = np.linspace(0, 2 * np.pi, 100)[:, np.newaxis]
predicted_partial = gh_interp.predict(data[:10, :])
predicted_all = gh_interp.predict(data_interp)
score = gh_interp.score(data, values)
# NOTE: if is_stochastic=True and alpha=1 the GH is able to reproduce the
# sin curve more accurately.
# self.assertEqual(score, 0.0005576927798107333)
if plot:
# To identify changes in the implementation, this checks against a reference
# solution
print(score)
plt.plot(data, values, "-*")
plt.plot(data_interp, predicted_all, "-*")
plt.plot(data[:10, :], predicted_partial, "-*")
plt.show()
class GeometricHarmonicsLegacyTest(unittest.TestCase):
# We want to produce exactly the same results as the forked DMAP repository. These
# are test to make sure this is the case.
def setUp(self):
np.random.seed(1)
self.data, _ = make_swiss_roll(n_samples=1000, noise=0, random_state=1)
dim_red_eps = 1.25
dmap = DiffusionMaps(
GaussianKernel(epsilon=dim_red_eps),
n_eigenpairs=6,
dist_kwargs=dict(cut_off=1e100),
).fit(self.data)
self.phi_all = dmap.eigenvectors_[:, [1, 5]] # column wise like X_all
train_idx_stop = int(self.data.shape[0] * 2 / 3)
self.data_train = self.data[:train_idx_stop, :]
self.data_test = self.data[train_idx_stop:, :]
self.phi_train = self.phi_all[:train_idx_stop, :]
self.phi_test = self.phi_all[train_idx_stop:, :]
def test_method_example1(self):
# Example from method_examples/diffusion_maps/geometric_harmonics --
# out-of-samples case.
eps_interp = 100 # in this case much larger compared to 1.25 for dim. reduction
n_eigenpairs = 50
# Because the distances were changed (to consistently squared) the
# interpolation DMAP has to be computed again for the legacy case.
legacy_dmap_interp = legacy_dmap.SparseDiffusionMaps(
points=self.data_train, # use part of data
epsilon=eps_interp, # eps. for interpolation
num_eigenpairs=n_eigenpairs, # number of basis functions
cut_off=np.inf,
normalize_kernel=False,
)
setting = {
"kernel": GaussianKernel(epsilon=eps_interp),
"n_eigenpairs": n_eigenpairs,
"dist_kwargs": dict(cut_off=1e100),
}
actual_phi0 = GeometricHarmonicsInterpolator(**setting).fit(
self.data_train, self.phi_train[:, 0]
)
actual_phi1 = GeometricHarmonicsInterpolator(**setting).fit(
self.data_train, self.phi_train[:, 1]
)
actual_phi2d = GeometricHarmonicsInterpolator(**setting).fit(
self.data_train, self.phi_train
)
expected_phi0 = legacy_dmap.GeometricHarmonicsInterpolator(
points=self.data_train,
values=self.phi_train[:, 0],
# legacy code requires to set epsilon even in the case when
# "diffusion_maps" is handled
epsilon=-1,
diffusion_maps=legacy_dmap_interp,
)
expected_phi1 = legacy_dmap.GeometricHarmonicsInterpolator(
points=self.data_train,
values=self.phi_train[:, 1],
epsilon=-1,
diffusion_maps=legacy_dmap_interp,
)
# The reason why there is a relatively large atol is because we changed the way
# to compute an internal parameter in the GeometricHarmonicsInterpolator (from
# n**3 to n**2) -- this introduced some numerical differences.
nptest.assert_allclose(
actual_phi0.predict(self.data),
expected_phi0(self.data),
rtol=1e-10,
atol=1e-14,
)
nptest.assert_allclose(
actual_phi1.predict(self.data),
expected_phi1(self.data),
rtol=1e-10,
atol=1e-14,
)
# only phi_test because the computation is quite expensive
nptest.assert_allclose(
actual_phi0.gradient(self.data_test),
expected_phi0.gradient(self.data_test),
rtol=1e-13,
atol=1e-14,
)
nptest.assert_allclose(
actual_phi1.gradient(self.data_test),
expected_phi1.gradient(self.data_test),
rtol=1e-13,
atol=1e-14,
)
# nD case
nptest.assert_allclose(
actual_phi2d.predict(self.data)[:, 0],
expected_phi0(self.data),
rtol=1e-11,
atol=1e-12,
)
nptest.assert_allclose(
actual_phi2d.predict(self.data)[:, 1],
expected_phi1(self.data),
rtol=1e-11,
atol=1e-12,
)
nptest.assert_allclose(
actual_phi2d.gradient(self.data_test, vcol=0),
expected_phi0.gradient(self.data_test),
rtol=1e-13,
atol=1e-14,
)
nptest.assert_allclose(
actual_phi2d.gradient(self.data_test, vcol=1),
expected_phi1.gradient(self.data_test),
rtol=1e-13,
atol=1e-14,
)
def test_method_example2(self):
# Example from method_examples/diffusion_maps/geometric_harmonics -- inverse case.
np.random.seed(1)
eps_interp = 0.0005
# in this case much smaller compared to 1.25 for dim. reduction or 100 for the
# forward map
n_eigenpairs = 100
legacy_dmap_interp = legacy_dmap.SparseDiffusionMaps(
points=self.phi_train, # (!!) we use phi now
epsilon=eps_interp, # new eps. for interpolation
num_eigenpairs=n_eigenpairs,
cut_off=1e100,
normalize_kernel=False,
)
setting = {
"kernel": GaussianKernel(epsilon=eps_interp),
"n_eigenpairs": n_eigenpairs,
"is_stochastic": False,
"dist_kwargs": dict(cut_off=1e100),
}
actual_x0 = GeometricHarmonicsInterpolator(**setting).fit(
self.phi_train, self.data_train[:, 0]
)
actual_x1 = GeometricHarmonicsInterpolator(**setting).fit(
self.phi_train, self.data_train[:, 1]
)
actual_x2 = GeometricHarmonicsInterpolator(**setting).fit(
self.phi_train, self.data_train[:, 2]
)
# interpolate both values at once (new feature)
actual_2values = GeometricHarmonicsInterpolator(**setting).fit(
self.phi_train, self.data_train
)
# compare to legacy GH
expected_x0 = legacy_dmap.GeometricHarmonicsInterpolator(
points=self.phi_train,
values=self.data_train[:, 0],
epsilon=-1,
diffusion_maps=legacy_dmap_interp,
)
expected_x1 = legacy_dmap.GeometricHarmonicsInterpolator(
points=self.phi_train,
values=self.data_train[:, 1],
epsilon=-1,
diffusion_maps=legacy_dmap_interp,
)
expected_x2 = legacy_dmap.GeometricHarmonicsInterpolator(
points=self.phi_train,
values=self.data_train[:, 2],
epsilon=-1,
diffusion_maps=legacy_dmap_interp,
)
nptest.assert_allclose(
actual_x0.predict(self.phi_all),
expected_x0(self.phi_all),
rtol=1e-4,
atol=1e-6,
)
nptest.assert_allclose(
actual_x1.predict(self.phi_all),
expected_x1(self.phi_all),
rtol=1e-4,
atol=1e-6,
)
nptest.assert_allclose(
actual_x2.predict(self.phi_all),
expected_x2(self.phi_all),
rtol=1e-4,
atol=1e-6,
)
# only phi_test because the computation is quite expensive
nptest.assert_allclose(
actual_x0.gradient(self.phi_test),
expected_x0.gradient(self.phi_test),
rtol=1e-13,
atol=1e-14,
)
nptest.assert_allclose(
actual_x1.gradient(self.phi_test),
expected_x1.gradient(self.phi_test),
rtol=1e-13,
atol=1e-14,
)
nptest.assert_allclose(
actual_x2.gradient(self.phi_test),
expected_x2.gradient(self.phi_test),
rtol=1e-13,
atol=1e-14,
)
nptest.assert_allclose(
actual_2values.predict(self.phi_all)[:, 0],
expected_x0(self.phi_all),
rtol=1e-5,
atol=1e-7,
)
nptest.assert_allclose(
actual_2values.predict(self.phi_all)[:, 1],
expected_x1(self.phi_all),
rtol=1e-5,
atol=1e-7,
)
nptest.assert_allclose(
actual_2values.predict(self.phi_all)[:, 2],
expected_x2(self.phi_all),
rtol=1e-5,
atol=1e-7,
)
nptest.assert_allclose(
actual_2values.gradient(self.phi_test, vcol=0),
expected_x0.gradient(self.phi_test),
rtol=1e-13,
atol=1e-14,
)
nptest.assert_allclose(
actual_2values.gradient(self.phi_test, vcol=1),
expected_x1.gradient(self.phi_test),
rtol=1e-13,
atol=1e-14,
)
nptest.assert_allclose(
actual_2values.gradient(self.phi_test, vcol=2),
expected_x2.gradient(self.phi_test),
rtol=1e-13,
atol=1e-14,
)
def test_same_underlying_kernel(self):
# Actually not a legacy test, but uses the set up.
eps_interp = 0.0005
actual = DmapKernelFixed(
GaussianKernel(epsilon=eps_interp), is_stochastic=False, alpha=1
)
# GH must be trained before to set kernel
gh = GeometricHarmonicsInterpolator(
kernel=GaussianKernel(eps_interp), n_eigenpairs=1, is_stochastic=False
).fit(self.data_train, self.phi_train)
self.assertEqual(gh._dmap_kernel, actual)
class LaplacianPyramidsTest(unittest.TestCase):
def setUpSyntheticFernandez(self) -> None:
rng = np.random.default_rng(2)
self.X_fern = np.linspace(0, 10 * np.pi, 2000)[:, np.newaxis]
self.X_fern_test = np.sort(rng.uniform(0, 10 * np.pi, 500))[:, np.newaxis]
delta = 0.05
# EVALUATE TRAIN DATA
indicator_range2 = np.logical_and(
self.X_fern > 10 * np.pi / 3, self.X_fern <= 10 * np.pi
)
indicator_range3 = np.logical_and(
self.X_fern > 2 * 10 * np.pi / 2, self.X_fern <= 10 * np.pi
)
noise = rng.uniform(low=-delta, high=delta, size=self.X_fern.shape[0])
noise = noise[:, np.newaxis]
self.y_fern = (
np.sin(self.X_fern)
+ 0.5 * np.sin(3 * self.X_fern) * indicator_range2
+ 0.25 * | np.sin(9 * self.X_fern) | numpy.sin |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 29 08:21:57 2017
@author: rebecca
"""
#Copyright 2018 <NAME>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#imports
from __future__ import division
import numpy as np
import math as m
import matplotlib.pylab as plt
#import pdb
#commonly changed inputs
f_bedload = 0.25 #% of sand
totaltimestep = 5000
L = 120 #domain size (# of cells in x-direction); typically on order of 100
W = 240 #domain size (# of cells in y-direction); W = 2L for semicircle growth
plotinterval = 10
plotintervalstrat = 250 #record strata less frequently
runID = 1
itermax = 1 # # of interations of water routing
Np_water = 2000 # number of water parcels; typically 2000
Np_sed = 2000 # number of sediment parcels
veg = 1 #veg on or off
#operations used in the model
class model_steps(object):
def direction_setup(self): #set up grid with # of neighbors and directions
Nnbr = np.zeros((L,W), dtype=np.int)
nbr = np.zeros((L,W,8))
#center nodes
Nnbr[1:L-1,1:W-1] = 8
nbr[1:L-1,1:W-1,:] = [(k+1) for k in range(8)]
# left side
Nnbr[0,1:W-1] = 5
for k in range(5):
nbr[0,1:W-1,k] = (6+(k+1))%8
nbr[0,1:W-1,1] = 8 #replace zeros with 8
# upper side
Nnbr[1:L-1,W-1] = 5
for k in range(5):
nbr[1:L-1,W-1,k] = (4+(k+1))%8
nbr[1:L-1,W-1,3] = 8 #replace zeros with 8
# lower side
Nnbr[1:L-1,0] = 5
for k in range(5):
nbr[1:L-1,0,k] = (k+1)%8
# lower-left corner
Nnbr[0,0] = 3
for k in range(3):
nbr[0,0,k] = (k+1)%8
# upper-left corner
Nnbr[0,W-1] = 3
for k in range(3):
nbr[0,W-1,k] = (6+(k+1))%8
nbr[0,W-1,1] = 8 #replace zeros with 8
self.Nnbr = Nnbr
self.nbr = nbr
def subsidence_setup(self): #set up subsidence
sigma = np.zeros((L,W))
sigma_max = 0.0*self.h0/1000
sigma_min = -0.0*self.h0/1000
for i in range(3,L):
for j in range(W):
sigma[i,j] = j/W*(sigma_max-sigma_min)+sigma_min
self.sigma = sigma
def setup(self): #define model parameters and set up grid
self.CTR = int((W-1)/2) # center cell
N0 = 5 # num of inlet cells
self.omega_flow_iter = 2*1/itermax
strataBtm = 1 #bottom layer elevation
self.dx = 50 #cell size, m
self.u0 = 1.0 #(m/s) characteristic flow velocity/inlet channel velocity
self.h0 = 5 # (m) characteristic flow depth/inlet channel depth; typically m to tens of m
self.S0 = 0.0003*f_bedload+0.0001*(1-f_bedload) #characteristic topographic slope; typically 10^-4-10^-5
V0 = self.h0*(self.dx**2) #(m^3) reference volume; volume to fill a channel cell to characteristic flow depth
dVs = 0.1*N0**2*V0 #sediment volume added in each timestep; used to determine time step size;
Qw0 = self.u0*self.h0*N0*self.dx
C0 = 0.1*1/100 #sediment concentration
Qs0 = Qw0*C0 #sediment total input discharge
self.dt = dVs/Qs0 #time step size
self.qw0 = Qw0/N0/self.dx #water unit input discharge
self.qs0 = self.qw0*C0 #sediment unit input discharge
self.Qp_water = Qw0/Np_water #volume of each water parcel
self.Vp_sed = dVs/Np_sed #volume of each sediment parcel
self.GRAVITY = 9.81
self.u_max = 2.0*self.u0
hB = 1.0*self.h0 #(m) basin depth
self.H_SL = 0 # sea level elevation (downstream boundary condition)
self.SLR = 0.0/1000/60/60/24/365#*self.h0/self.dt #put mm/yr as first number, converts to m/s
self.dry_depth = min(0.1,0.1*self.h0) #(m) critical depth to switch to 'dry' node
self.gamma = self.GRAVITY*self.S0*self.dx/self.u0/self.u0 #determines ratio of influence of inertia versus water surface gradient in calculating routing direction; controls how much water spreads laterally
#parameters for random walk probability calc
self.theta_water = 1.0 #depth dependence (power of h) in routing water parcels
self.theta_sand = 2.0*self.theta_water # depth dependence (power of h) in routing sand parcels; sand in lower part of water column, more likely to follow topographic lows
self.theta_mud = 1.0*self.theta_water # depth dependence (power of h) in routing mud parcels
#sediment deposition/erosion related parameters
self.beta = 3 #non-linear exponent of sediment flux to flow velocity
self._lambda = 1.0 #"sedimentation lag" - 1.0 means no lag
self.U_dep_mud = 0.3*self.u0 #if velocity is lower than this, mud is deposited
self.U_ero_sand = 1.05*self.u0 #if velocity higher than this, sand eroded
self.U_ero_mud = 1.5*self.u0 #if velocity higher than this, mud eroded
#topo diffusion relation parameters
self.alpha = np.zeros((L,W)) #0.05*(0.25*1*0.125) # topo-diffusion coefficient
self.N_crossdiff = int(round(dVs/V0))
if veg == 0:
self.alpha = self.alpha + 0.1
#veg related paremeters
self.d_stem = 0.006 #stem diameter (m)
self.timestep_count = 0 #for tracking if interflood
self.f_veg = np.zeros((L,W)) #fractional cover/density of vegetation for each cell
self.K = 800 #carrying capacity (stems/cell)
self.a = 0.88*4/(m.pi*self.d_stem**2*self.K*((4/self.d_stem/self.K)-(0.7/self.d_stem/self.K))) #coefficient to make vegetation have proper influence
self.flood_dur = 3*24*60*60 #(s) 1 day, flood duration
if veg == 1:
self.f_veg_init = 0.05 #starting density is 5%
self.r_veg = 1/(365*24*60*60) #(s-1) growth rate
flood_freq = 100*24*60*60 #(s) 100 days, flood frequency/interflood period
self.dt_veg = flood_freq #time for veg growth
self.d_root = 0.2 #(m) root depth is 20 cm
self.eta_change = np.zeros((L,W))
self.eta_change_net = np.zeros((L,W))
#smoothing water surface
self.Nsmooth = 10 #iteration of surface smoothing per timestep
self.Csmooth = 0.9 #center-weighted surface smoothing
#under-relaxation between iterations
self.omega_sfc = 0.1 #under-relaxation coef for water surface
self.omega_flow = 0.9 #under-relaxation coef for water flow
#storage prep
self.eta = np.zeros((L,W)) # bed elevation
self.H = np.zeros((L,W)) #free surface elevation
self.h = np.zeros((L,W)) #depth of water
self.qx = np.zeros((L,W)) #unit discharge vector (x-comp)
self.qy = np.zeros((L,W)) #unit discharge vector (y-comp)
self.qw = np.zeros((L,W)) #unit discharge vector magnitude
self.ux = np.zeros((L,W)) #velocity vector (x-comp)
self.uy = np.zeros((L,W)) #velocity vector (y-comp)
self.uw = np.zeros((L,W)) #velocity magnitude
#value definition
SQ05 = m.sqrt(0.5)
SQ2 = m.sqrt(2)
self.dxn_ivec = [1,SQ05,0,-SQ05,-1,-SQ05,0,SQ05] #E --> clockwise
self.dxn_jvec = [0,SQ05,1,SQ05,0,-SQ05,-1,-SQ05] #E --> clockwise
self.dxn_iwalk = [1,1,0,-1,-1,-1,0,1] #E --> clockwise
self.dxn_jwalk = [0,1,1,1,0,-1,-1,-1] #E --> clockwise
self.dxn_dist = [1,SQ2,1,SQ2,1,SQ2,1,SQ2] #E --> clockwise
self.wall_flag = np.zeros((L,W))
self.boundflag = np.zeros((L,W), dtype=np.int)
result = [(m.sqrt((i-3)**2+(j-self.CTR)**2))
for i in range(L)
for j in range(W)]
result = np.reshape(result,(L,W))
self.boundflag[result >= min(L-5,W/2-5)] = 1
#initial setup
self.L0 = 3
# type_ocean = 0
type_chn = 1
type_sed = 2
types = np.zeros((L,W))
types[0:self.L0,:] = type_sed
types[0:self.L0,int(self.CTR-round(N0/2)+1):int(self.CTR-round(N0/2)+N0+1)] = type_chn
self.wall_flag[types>1] = 1
#topo setup
self.h[types==0] = hB
self.h[types==1] = self.h0
self.H[0,:] = max(0,self.L0-1)*self.dx*self.S0
self.H[1,:] = max(0,self.L0-2)*self.dx*self.S0
self.H[2,:] = max(0,self.L0-3)*self.dx*self.S0
self.eta = self.H-self.h
#flow setup; flow doesn't satisfy mass conservation
self.qx[types==1] = self.qw0
self.qx[types==0] = self.qw0/5
self.qw = np.sqrt(self.qx**2+self.qy**2)
self.ux[self.h>0] = self.qx[self.h>0]/self.h[self.h>0]
self.uy[self.h>0] = self.qy[self.h>0]/self.h[self.h>0]
self.uw[self.h>0] = self.qw[self.h>0]/self.h[self.h>0]
self.direction_setup()
self.subsidence_setup()
self.wet_flag = np.zeros((L,W))
self.py_start = np.arange(self.CTR-round(N0/2)+1,self.CTR-round(N0/2)+N0+1, dtype=np.int) #vector of inlet cells y coord
self.px_start = 0
self.dxn_iwalk_inlet = self.dxn_iwalk[0] #x comp of inlet flow direction
self.dxn_jwalk_inlet = self.dxn_jwalk[0] #y comp of inlet flow direction
self.itmax = 2*(L+W)
#self.Hnew = np.zeros((L,W)) #temp water surface elevation before smoothing
self.qxn = np.zeros((L,W)) #placeholder for qx during calculations
self.qyn = np.zeros((L,W))
self.qwn = np.zeros((L,W))
self.sfc_visit = np.zeros((L,W)) # number of water parcels that have visited cell
self.sfc_sum = np.zeros((L,W)) #sum of water surface elevations from parcels that have visited cell
self.prepath_flag = np.zeros((L,W)) #flag for one parcel, to see if it should continue
self.iseq = np.zeros((self.itmax,1)) #tracks which cells were visited by parcel
self.jseq = np.zeros((self.itmax,1))
self.qs = np.zeros((L,W))
#prepare to record strata
self.z0 = self.H_SL-self.h0*strataBtm #bottom layer elevation
self.dz = 0.01*self.h0 #layer thickness
zmax = int(round((self.H_SL+self.SLR*totaltimestep*self.dt+self.S0*L/2*self.dx-self.z0)/self.dz)+10) #max layer number
strata0 =-1 #default value of none
self.strata = | np.ones((L,W,zmax)) | numpy.ones |
import numpy as np
from PIL import Image, ImageDraw
from .colors import *
def colorize_class_preds(class_maps, no_classes):
# class maps are level-batch-class-H-W
np_arrays = []
for lvl in class_maps:
lvl = map_color_values(lvl, no_classes, True)
np_arrays.append(lvl)
return np_arrays
def normalize_centerness(center_maps):
p_min = 1E6
p_max = -1E6
for lvl in center_maps:
p_min = np.min([p_min, np.min(lvl)])
p_max = np.max([p_max, np.max(lvl)])
normed_imgs = []
for lvl in center_maps:
lvl = (lvl - p_min) / (p_max - p_min) * 255
normed_imgs.append(lvl)
return normed_imgs
def image_pyramid(pred_maps, target_size):
"""Turns as series of images to a column of target_size images."""
resized_imgs = []
for lvl in pred_maps:
lvl = lvl.astype(np.uint8)
lvl_img = Image.fromarray(lvl)
lvl_img = lvl_img.resize(target_size[::-1])
lvl_img = np.array(lvl_img)
resized_imgs.append(lvl_img)
resized_imgs.append(np.full((10,) + lvl_img.shape[1:], 255))
img_cat = np.concatenate(resized_imgs)
return img_cat.astype(np.uint8)
def get_present_classes(classes_vis):
"""Finds all classes that exist in a given picture."""
unique_vals = []
for vis in classes_vis:
if isinstance(vis, np.ndarray):
unique_vals.extend(np.unique(vis))
else:
unique_vals.extend(np.unique(vis.cpu().numpy()))
ret = set(unique_vals)
try:
ret.remove(-1)
except KeyError:
pass
ret = list(ret)
ret.sort()
return ret
def stitch_big_image(images_list):
"""Stitches separate np.ndarray images into a single large array."""
if isinstance(images_list[0], np.ndarray):
# stitch vertically
# stack to 3 channels if necessary
max_len = 0
for ind, ele in enumerate(images_list):
if ele.shape[-1] == 1:
images_list[ind] = np.concatenate([ele, ele, ele], -1)
if ele.shape[1] > max_len:
max_len = ele.shape[1]
for ind, ele in enumerate(images_list):
if ele.shape[1] < max_len:
pad_ele = np.zeros(
(ele.shape[0], max_len-ele.shape[1], 3), np.uint8
)
images_list[ind] = np.concatenate([pad_ele, images_list[
ind]], 1)
return | np.concatenate(images_list, 0) | numpy.concatenate |
#!/usr/bin/env python
from datetime import datetime
import time
from kobot.msg import range_n_bearing_sensor, landmark_sensor, floor_sensor
import rospy
from geometry_msgs.msg import Twist, Vector3, PointStamped, PoseStamped
from std_msgs.msg import UInt8, Bool, String
from nav_msgs.msg import Odometry
import numpy as np
import math
import random
import tf
# for publishing dictionary as encoded string
import json
class LBADriver(object):
def __init__(self):
self.nu = 0
# max_d = 1.74
max_d = 1.1
th_len = 3
d_len = 2
self.x_goal = 0.0
self.y_goal = 0.0
self.action_dim = [th_len, d_len]
# a_th_arr = np.linspace(2*math.pi/10, 8*math.pi/10, th_len)
# a_th_arr = [math.pi/6, math.pi/3, math.pi/2, 2*math.pi/3,5*math.pi/6]
# a_th_arr = [2*math.pi/10, math.pi/2, 8*math.pi/10]
a_th_arr = [math.pi/3, math.pi/2, 2*math.pi/3]
# a_th_arr = [math.pi/2]
# a_d_arr = np.linspace(max_d, max_d, d_len)
a_d_arr = [0.9,1.4]
actions = []
for a_th in a_th_arr:
action_d = []
for a_d in a_d_arr:
action_d.append([a_th, a_d])
actions.append(action_d)
self.actions = actions
self.landmark_id_list = [40, 41, 42, 43, 44, 45]
# convert landmark ids to str
self.landmark_id_list = [str(i) for i in self.landmark_id_list]
self.Q_table = {}
self.check_table = {}
# initialize dict. with landmark ids are keys
# and a np array is reward values
for landmark_id in self.landmark_id_list:
self.Q_table[landmark_id] = np.zeros((th_len, d_len))
self.check_table[landmark_id] = np.zeros((th_len, d_len))
self.get_params()
# default vals.
self.active_landmark = None
self.action_id = [None, None]
self.prev_landmark = None
self.obs_detected = False
self.robot_detected = False
self.going_cue = False
self.sub_lock = False
self.range_prev = [0]*8
self.is_robot_prev = [0]*8
self.robot_pose = [0, 0, 0]
self.I_c = 0
self.I_avg_prev = 0
# message initalizers w/ def. vals.
self.obj_msg = Bool()
self.obj_msg = False
self.intensity_msg = UInt8()
self.intensity_msg = 0
self.landmark_msg = UInt8()
self.landmark_msg = 0
self.landmark_dict = {}
# first define publishers to not get any err.
self.nav_vel_pub = rospy.Publisher(
"nav_vel",
Twist, queue_size=1)
self.Q_table_pub = rospy.Publisher(
"Q_table",
String, queue_size=1)
self.check_table_pub = rospy.Publisher(
"check_table",
String, queue_size=1)
# publishers for neopixel visualization
self.intensity_vis_pub = rospy.Publisher(
"lba/intensity",
UInt8, queue_size=1)
self.landmark_vis_pub = rospy.Publisher(
"lba/landmark",
UInt8, queue_size=1)
# publisher for encoded landmark dict.
self.dict_pub = rospy.Publisher(
"landmark",
String, queue_size=1)
# publisher for closed loop position control
self.pose_goal_pub = rospy.Publisher(
"move_base_simple/goal",
PoseStamped, queue_size=1)
# publisher for switching between vel. and pos. control
self.move_lock_pub = rospy.Publisher(
"move_lock",
Bool, queue_size=1)
freq = 20
if rospy.has_param('odom_freq'):
freq = rospy.get_param('odom_freq')
self.rate_turn_theta = rospy.Rate(freq)
# transformer objects
self.listener = tf.TransformListener()
self.broadcaster = tf.TransformBroadcaster()
rospy.Subscriber("goal_reached",
Bool,
self.goal_reached_callback,
queue_size=1)
rospy.Subscriber("sensors/range_n_bearing",
range_n_bearing_sensor,
self.rb_callback,
queue_size=1)
rospy.Subscriber("sensors/landmark_sensor",
UInt8,
self.landmark_callback,
queue_size=1)
rospy.Subscriber("sensors/floor_sensor",
floor_sensor,
self.intensity_callback,
queue_size=1)
rospy.Subscriber("wheel_odom",
Odometry,
self.odom_callback,
queue_size=1)
def goal_reached_callback(self, data):
"""
Callback called when position controller
informs that goal is reached
"""
if data.data:
self.going_cue = False
rospy.loginfo("Goal reached")
if self.I_c > self.I_thresh:
self.publish_neopixel('green')
self.update_Q_table(self.I_c)
else:
self.publish_neopixel('white')
self.update_Q_table(-1)
else:
self.going_cue = False
rospy.loginfo("Goal not reached")
def select_action(self):
"""
Decide on whether to explore or
exploit the Q Table with a random
action defined by epsilon value
"""
if self.going_cue:
return
epsilon = random.uniform(0.0, 1.0)
if epsilon < self.epsilon:
rospy.loginfo("Random action")
# exploration
self.publish_neopixel('red')
th_indx = random.randint(0, self.action_dim[0]-1)
d_indx = random.randint(0, self.action_dim[1]-1)
self.action_id = [th_indx, d_indx]
else:
rospy.loginfo("Best action")
# explotation
self.publish_neopixel('green')
actions = self.Q_table[self.active_landmark]
# TODO if we have more than one action
# best actions having same value choose
# randomly from them
i, j = np.unravel_index(
| np.argmax(actions, axis=None) | numpy.argmax |
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2018-2020 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""This module provides Arc ROI item for the :class:`~silx.gui.plot.PlotWidget`.
"""
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "28/06/2018"
import numpy
from ... import utils
from .. import items
from ...colors import rgba
from ....utils.proxy import docstring
from ._roi_base import HandleBasedROI
from ._roi_base import InteractionModeMixIn
from ._roi_base import RoiInteractionMode
class _ArcGeometry:
"""
Non-mutable object to store the geometry of the arc ROI.
The aim is is to switch between consistent state without dealing with
intermediate values.
"""
def __init__(self, center, startPoint, endPoint, radius,
weight, startAngle, endAngle, closed=False):
"""Constructor for a consistent arc geometry.
There is also specific class method to create different kind of arc
geometry.
"""
self.center = center
self.startPoint = startPoint
self.endPoint = endPoint
self.radius = radius
self.weight = weight
self.startAngle = startAngle
self.endAngle = endAngle
self._closed = closed
@classmethod
def createEmpty(cls):
"""Create an arc geometry from an empty shape
"""
zero = numpy.array([0, 0])
return cls(zero, zero.copy(), zero.copy(), 0, 0, 0, 0)
@classmethod
def createRect(cls, startPoint, endPoint, weight):
"""Create an arc geometry from a definition of a rectangle
"""
return cls(None, startPoint, endPoint, None, weight, None, None, False)
@classmethod
def createCircle(cls, center, startPoint, endPoint, radius,
weight, startAngle, endAngle):
"""Create an arc geometry from a definition of a circle
"""
return cls(center, startPoint, endPoint, radius,
weight, startAngle, endAngle, True)
def withWeight(self, weight):
"""Return a new geometry based on this object, with a specific weight
"""
return _ArcGeometry(self.center, self.startPoint, self.endPoint,
self.radius, weight,
self.startAngle, self.endAngle, self._closed)
def withRadius(self, radius):
"""Return a new geometry based on this object, with a specific radius.
The weight and the center is conserved.
"""
startPoint = self.center + (self.startPoint - self.center) / self.radius * radius
endPoint = self.center + (self.endPoint - self.center) / self.radius * radius
return _ArcGeometry(self.center, startPoint, endPoint,
radius, self.weight,
self.startAngle, self.endAngle, self._closed)
def withStartAngle(self, startAngle):
"""Return a new geometry based on this object, with a specific start angle
"""
vector = numpy.array([numpy.cos(startAngle), numpy.sin(startAngle)])
startPoint = self.center + vector * self.radius
# Never add more than 180 to maintain coherency
deltaAngle = startAngle - self.startAngle
if deltaAngle > numpy.pi:
deltaAngle -= numpy.pi * 2
elif deltaAngle < -numpy.pi:
deltaAngle += numpy.pi * 2
startAngle = self.startAngle + deltaAngle
return _ArcGeometry(
self.center,
startPoint,
self.endPoint,
self.radius,
self.weight,
startAngle,
self.endAngle,
self._closed,
)
def withEndAngle(self, endAngle):
"""Return a new geometry based on this object, with a specific end angle
"""
vector = numpy.array([numpy.cos(endAngle), numpy.sin(endAngle)])
endPoint = self.center + vector * self.radius
# Never add more than 180 to maintain coherency
deltaAngle = endAngle - self.endAngle
if deltaAngle > numpy.pi:
deltaAngle -= numpy.pi * 2
elif deltaAngle < -numpy.pi:
deltaAngle += numpy.pi * 2
endAngle = self.endAngle + deltaAngle
return _ArcGeometry(
self.center,
self.startPoint,
endPoint,
self.radius,
self.weight,
self.startAngle,
endAngle,
self._closed,
)
def translated(self, dx, dy):
"""Return the translated geometry by dx, dy"""
delta = numpy.array([dx, dy])
center = None if self.center is None else self.center + delta
startPoint = None if self.startPoint is None else self.startPoint + delta
endPoint = None if self.endPoint is None else self.endPoint + delta
return _ArcGeometry(center, startPoint, endPoint,
self.radius, self.weight,
self.startAngle, self.endAngle, self._closed)
def getKind(self):
"""Returns the kind of shape defined"""
if self.center is None:
return "rect"
elif numpy.isnan(self.startAngle):
return "point"
elif self.isClosed():
if self.weight <= 0 or self.weight * 0.5 >= self.radius:
return "circle"
else:
return "donut"
else:
if self.weight * 0.5 < self.radius:
return "arc"
else:
return "camembert"
def isClosed(self):
"""Returns True if the geometry is a circle like"""
if self._closed is not None:
return self._closed
delta = numpy.abs(self.endAngle - self.startAngle)
self._closed = numpy.isclose(delta, numpy.pi * 2)
return self._closed
def __str__(self):
return str((self.center,
self.startPoint,
self.endPoint,
self.radius,
self.weight,
self.startAngle,
self.endAngle,
self._closed))
class ArcROI(HandleBasedROI, items.LineMixIn, InteractionModeMixIn):
"""A ROI identifying an arc of a circle with a width.
This ROI provides
- 3 handle to control the curvature
- 1 handle to control the weight
- 1 anchor to translate the shape.
"""
ICON = 'add-shape-arc'
NAME = 'arc ROI'
SHORT_NAME = "arc"
"""Metadata for this kind of ROI"""
_plotShape = "line"
"""Plot shape which is used for the first interaction"""
ThreePointMode = RoiInteractionMode("3 points", "Provides 3 points to define the main radius circle")
PolarMode = RoiInteractionMode("Polar", "Provides anchors to edit the ROI in polar coords")
# FIXME: MoveMode was designed cause there is too much anchors
# FIXME: It would be good replace it by a dnd on the shape
MoveMode = RoiInteractionMode("Translation", "Provides anchors to only move the ROI")
def __init__(self, parent=None):
HandleBasedROI.__init__(self, parent=parent)
items.LineMixIn.__init__(self)
InteractionModeMixIn.__init__(self)
self._geometry = _ArcGeometry.createEmpty()
self._handleLabel = self.addLabelHandle()
self._handleStart = self.addHandle()
self._handleMid = self.addHandle()
self._handleEnd = self.addHandle()
self._handleWeight = self.addHandle()
self._handleWeight._setConstraint(self._arcCurvatureMarkerConstraint)
self._handleMove = self.addTranslateHandle()
shape = items.Shape("polygon")
shape.setPoints([[0, 0], [0, 0]])
shape.setColor(rgba(self.getColor()))
shape.setFill(False)
shape.setOverlay(True)
shape.setLineStyle(self.getLineStyle())
shape.setLineWidth(self.getLineWidth())
self.__shape = shape
self.addItem(shape)
self._initInteractionMode(self.ThreePointMode)
self._interactiveModeUpdated(self.ThreePointMode)
def availableInteractionModes(self):
"""Returns the list of available interaction modes
:rtype: List[RoiInteractionMode]
"""
return [self.ThreePointMode, self.PolarMode, self.MoveMode]
def _interactiveModeUpdated(self, modeId):
"""Set the interaction mode.
:param RoiInteractionMode modeId:
"""
if modeId is self.ThreePointMode:
self._handleStart.setSymbol("s")
self._handleMid.setSymbol("s")
self._handleEnd.setSymbol("s")
self._handleWeight.setSymbol("d")
self._handleMove.setSymbol("+")
elif modeId is self.PolarMode:
self._handleStart.setSymbol("o")
self._handleMid.setSymbol("o")
self._handleEnd.setSymbol("o")
self._handleWeight.setSymbol("d")
self._handleMove.setSymbol("+")
elif modeId is self.MoveMode:
self._handleStart.setSymbol("")
self._handleMid.setSymbol("+")
self._handleEnd.setSymbol("")
self._handleWeight.setSymbol("")
self._handleMove.setSymbol("+")
else:
assert False
if self._geometry.isClosed():
if modeId != self.MoveMode:
self._handleStart.setSymbol("x")
self._handleEnd.setSymbol("x")
self._updateHandles()
def _updated(self, event=None, checkVisibility=True):
if event == items.ItemChangedType.VISIBLE:
self._updateItemProperty(event, self, self.__shape)
super(ArcROI, self)._updated(event, checkVisibility)
def _updatedStyle(self, event, style):
super(ArcROI, self)._updatedStyle(event, style)
self.__shape.setColor(style.getColor())
self.__shape.setLineStyle(style.getLineStyle())
self.__shape.setLineWidth(style.getLineWidth())
def setFirstShapePoints(self, points):
""""Initialize the ROI using the points from the first interaction.
This interaction is constrained by the plot API and only supports few
shapes.
"""
# The first shape is a line
point0 = points[0]
point1 = points[1]
# Compute a non collinear point for the curvature
center = (point1 + point0) * 0.5
normal = point1 - center
normal = numpy.array((normal[1], -normal[0]))
defaultCurvature = numpy.pi / 5.0
weightCoef = 0.20
mid = center - normal * defaultCurvature
distance = numpy.linalg.norm(point0 - point1)
weight = distance * weightCoef
geometry = self._createGeometryFromControlPoints(point0, mid, point1, weight)
self._geometry = geometry
self._updateHandles()
def _updateText(self, text):
self._handleLabel.setText(text)
def _updateMidHandle(self):
"""Keep the same geometry, but update the location of the control
points.
So calling this function do not trigger sigRegionChanged.
"""
geometry = self._geometry
if geometry.isClosed():
start = numpy.array(self._handleStart.getPosition())
midPos = geometry.center + geometry.center - start
else:
if geometry.center is None:
midPos = geometry.startPoint * 0.5 + geometry.endPoint * 0.5
else:
midAngle = geometry.startAngle * 0.5 + geometry.endAngle * 0.5
vector = numpy.array([numpy.cos(midAngle), numpy.sin(midAngle)])
midPos = geometry.center + geometry.radius * vector
with utils.blockSignals(self._handleMid):
self._handleMid.setPosition(*midPos)
def _updateWeightHandle(self):
geometry = self._geometry
if geometry.center is None:
# rectangle
center = (geometry.startPoint + geometry.endPoint) * 0.5
normal = geometry.endPoint - geometry.startPoint
normal = numpy.array((normal[1], -normal[0]))
distance = numpy.linalg.norm(normal)
if distance != 0:
normal = normal / distance
weightPos = center + normal * geometry.weight * 0.5
else:
if geometry.isClosed():
midAngle = geometry.startAngle + numpy.pi * 0.5
elif geometry.center is not None:
midAngle = (geometry.startAngle + geometry.endAngle) * 0.5
vector = numpy.array([numpy.cos(midAngle), numpy.sin(midAngle)])
weightPos = geometry.center + (geometry.radius + geometry.weight * 0.5) * vector
with utils.blockSignals(self._handleWeight):
self._handleWeight.setPosition(*weightPos)
def _getWeightFromHandle(self, weightPos):
geometry = self._geometry
if geometry.center is None:
# rectangle
center = (geometry.startPoint + geometry.endPoint) * 0.5
return numpy.linalg.norm(center - weightPos) * 2
else:
distance = numpy.linalg.norm(geometry.center - weightPos)
return abs(distance - geometry.radius) * 2
def _updateHandles(self):
geometry = self._geometry
with utils.blockSignals(self._handleStart):
self._handleStart.setPosition(*geometry.startPoint)
with utils.blockSignals(self._handleEnd):
self._handleEnd.setPosition(*geometry.endPoint)
self._updateMidHandle()
self._updateWeightHandle()
self._updateShape()
def _updateCurvature(self, start, mid, end, updateCurveHandles, checkClosed=False, updateStart=False):
"""Update the curvature using 3 control points in the curve
:param bool updateCurveHandles: If False curve handles are already at
the right location
"""
if checkClosed:
closed = self._isCloseInPixel(start, end)
else:
closed = self._geometry.isClosed()
if closed:
if updateStart:
start = end
else:
end = start
if updateCurveHandles:
with utils.blockSignals(self._handleStart):
self._handleStart.setPosition(*start)
with utils.blockSignals(self._handleMid):
self._handleMid.setPosition(*mid)
with utils.blockSignals(self._handleEnd):
self._handleEnd.setPosition(*end)
weight = self._geometry.weight
geometry = self._createGeometryFromControlPoints(start, mid, end, weight, closed=closed)
self._geometry = geometry
self._updateWeightHandle()
self._updateShape()
def _updateCloseInAngle(self, geometry, updateStart):
azim = numpy.abs(geometry.endAngle - geometry.startAngle)
if numpy.pi < azim < 3 * numpy.pi:
closed = self._isCloseInPixel(geometry.startPoint, geometry.endPoint)
geometry._closed = closed
if closed:
sign = 1 if geometry.startAngle < geometry.endAngle else -1
if updateStart:
geometry.startPoint = geometry.endPoint
geometry.startAngle = geometry.endAngle - sign * 2*numpy.pi
else:
geometry.endPoint = geometry.startPoint
geometry.endAngle = geometry.startAngle + sign * 2*numpy.pi
def handleDragUpdated(self, handle, origin, previous, current):
modeId = self.getInteractionMode()
if handle is self._handleStart:
if modeId is self.ThreePointMode:
mid = numpy.array(self._handleMid.getPosition())
end = numpy.array(self._handleEnd.getPosition())
self._updateCurvature(
current, mid, end, checkClosed=True, updateStart=True,
updateCurveHandles=False
)
elif modeId is self.PolarMode:
v = current - self._geometry.center
startAngle = numpy.angle(complex(v[0], v[1]))
geometry = self._geometry.withStartAngle(startAngle)
self._updateCloseInAngle(geometry, updateStart=True)
self._geometry = geometry
self._updateHandles()
elif handle is self._handleMid:
if modeId is self.ThreePointMode:
if self._geometry.isClosed():
radius = numpy.linalg.norm(self._geometry.center - current)
self._geometry = self._geometry.withRadius(radius)
self._updateHandles()
else:
start = numpy.array(self._handleStart.getPosition())
end = numpy.array(self._handleEnd.getPosition())
self._updateCurvature(start, current, end, updateCurveHandles=False)
elif modeId is self.PolarMode:
radius = numpy.linalg.norm(self._geometry.center - current)
self._geometry = self._geometry.withRadius(radius)
self._updateHandles()
elif modeId is self.MoveMode:
delta = current - previous
self.translate(*delta)
elif handle is self._handleEnd:
if modeId is self.ThreePointMode:
start = numpy.array(self._handleStart.getPosition())
mid = numpy.array(self._handleMid.getPosition())
self._updateCurvature(
start, mid, current, checkClosed=True, updateStart=False,
updateCurveHandles=False
)
elif modeId is self.PolarMode:
v = current - self._geometry.center
endAngle = numpy.angle(complex(v[0], v[1]))
geometry = self._geometry.withEndAngle(endAngle)
self._updateCloseInAngle(geometry, updateStart=False)
self._geometry = geometry
self._updateHandles()
elif handle is self._handleWeight:
weight = self._getWeightFromHandle(current)
self._geometry = self._geometry.withWeight(weight)
self._updateShape()
elif handle is self._handleMove:
delta = current - previous
self.translate(*delta)
def _isCloseInPixel(self, point1, point2):
manager = self.parent()
if manager is None:
return False
plot = manager.parent()
if plot is None:
return False
point1 = plot.dataToPixel(*point1)
if point1 is None:
return False
point2 = plot.dataToPixel(*point2)
if point2 is None:
return False
return abs(point1[0] - point2[0]) + abs(point1[1] - point2[1]) < 15
def _normalizeGeometry(self):
"""Keep the same phisical geometry, but with normalized parameters.
"""
geometry = self._geometry
if geometry.weight * 0.5 >= geometry.radius:
radius = (geometry.weight * 0.5 + geometry.radius) * 0.5
geometry = geometry.withRadius(radius)
geometry = geometry.withWeight(radius * 2)
self._geometry = geometry
return True
return False
def handleDragFinished(self, handle, origin, current):
modeId = self.getInteractionMode()
if handle in [self._handleStart, self._handleMid, self._handleEnd]:
if modeId is self.ThreePointMode:
self._normalizeGeometry()
self._updateHandles()
if self._geometry.isClosed():
if modeId is self.MoveMode:
self._handleStart.setSymbol("")
self._handleEnd.setSymbol("")
else:
self._handleStart.setSymbol("x")
self._handleEnd.setSymbol("x")
else:
if modeId is self.ThreePointMode:
self._handleStart.setSymbol("s")
self._handleEnd.setSymbol("s")
elif modeId is self.PolarMode:
self._handleStart.setSymbol("o")
self._handleEnd.setSymbol("o")
if modeId is self.MoveMode:
self._handleStart.setSymbol("")
self._handleEnd.setSymbol("")
def _createGeometryFromControlPoints(self, start, mid, end, weight, closed=None):
"""Returns the geometry of the object"""
if closed or (closed is None and numpy.allclose(start, end)):
# Special arc: It's a closed circle
center = (start + mid) * 0.5
radius = numpy.linalg.norm(start - center)
v = start - center
startAngle = numpy.angle(complex(v[0], v[1]))
endAngle = startAngle + numpy.pi * 2.0
return _ArcGeometry.createCircle(
center, start, end, radius, weight, startAngle, endAngle
)
elif numpy.linalg.norm(numpy.cross(mid - start, end - start)) < 1e-5:
# Degenerated arc, it's a rectangle
return _ArcGeometry.createRect(start, end, weight)
else:
center, radius = self._circleEquation(start, mid, end)
v = start - center
startAngle = numpy.angle(complex(v[0], v[1]))
v = mid - center
midAngle = numpy.angle(complex(v[0], v[1]))
v = end - center
endAngle = numpy.angle(complex(v[0], v[1]))
# Is it clockwise or anticlockwise
relativeMid = (endAngle - midAngle + 2 * numpy.pi) % (2 * numpy.pi)
relativeEnd = (endAngle - startAngle + 2 * numpy.pi) % (2 * numpy.pi)
if relativeMid < relativeEnd:
if endAngle < startAngle:
endAngle += 2 * numpy.pi
else:
if endAngle > startAngle:
endAngle -= 2 * numpy.pi
return _ArcGeometry(center, start, end,
radius, weight, startAngle, endAngle)
def _createShapeFromGeometry(self, geometry):
kind = geometry.getKind()
if kind == "rect":
# It is not an arc
# but we can display it as an intermediate shape
normal = geometry.endPoint - geometry.startPoint
normal = numpy.array((normal[1], -normal[0]))
distance = numpy.linalg.norm(normal)
if distance != 0:
normal /= distance
points = numpy.array([
geometry.startPoint + normal * geometry.weight * 0.5,
geometry.endPoint + normal * geometry.weight * 0.5,
geometry.endPoint - normal * geometry.weight * 0.5,
geometry.startPoint - normal * geometry.weight * 0.5])
elif kind == "point":
# It is not an arc
# but we can display it as an intermediate shape
# NOTE: At least 2 points are expected
points = numpy.array([geometry.startPoint, geometry.startPoint])
elif kind == "circle":
outerRadius = geometry.radius + geometry.weight * 0.5
angles = numpy.linspace(0, 2 * numpy.pi, num=50)
# It's a circle
points = []
numpy.append(angles, angles[-1])
for angle in angles:
direction = numpy.array([numpy.cos(angle), numpy.sin(angle)])
points.append(geometry.center + direction * outerRadius)
points = numpy.array(points)
elif kind == "donut":
innerRadius = geometry.radius - geometry.weight * 0.5
outerRadius = geometry.radius + geometry.weight * 0.5
angles = numpy.linspace(0, 2 * numpy.pi, num=50)
# It's a donut
points = []
# NOTE: NaN value allow to create 2 separated circle shapes
# using a single plot item. It's a kind of cheat
points.append(numpy.array([float("nan"), float("nan")]))
for angle in angles:
direction = numpy.array([numpy.cos(angle), numpy.sin(angle)])
points.insert(0, geometry.center + direction * innerRadius)
points.append(geometry.center + direction * outerRadius)
points.append(numpy.array([float("nan"), float("nan")]))
points = numpy.array(points)
else:
innerRadius = geometry.radius - geometry.weight * 0.5
outerRadius = geometry.radius + geometry.weight * 0.5
delta = 0.1 if geometry.endAngle >= geometry.startAngle else -0.1
if geometry.startAngle == geometry.endAngle:
# Degenerated, it's a line (single radius)
angle = geometry.startAngle
direction = numpy.array([numpy.cos(angle), numpy.sin(angle)])
points = []
points.append(geometry.center + direction * innerRadius)
points.append(geometry.center + direction * outerRadius)
return numpy.array(points)
angles = numpy.arange(geometry.startAngle, geometry.endAngle, delta)
if angles[-1] != geometry.endAngle:
angles = numpy.append(angles, geometry.endAngle)
if kind == "camembert":
# It's a part of camembert
points = []
points.append(geometry.center)
points.append(geometry.startPoint)
delta = 0.1 if geometry.endAngle >= geometry.startAngle else -0.1
for angle in angles:
direction = numpy.array([numpy.cos(angle), numpy.sin(angle)])
points.append(geometry.center + direction * outerRadius)
points.append(geometry.endPoint)
points.append(geometry.center)
elif kind == "arc":
# It's a part of donut
points = []
points.append(geometry.startPoint)
for angle in angles:
direction = numpy.array([numpy.cos(angle), numpy.sin(angle)])
points.insert(0, geometry.center + direction * innerRadius)
points.append(geometry.center + direction * outerRadius)
points.insert(0, geometry.endPoint)
points.append(geometry.endPoint)
else:
assert False
points = numpy.array(points)
return points
def _updateShape(self):
geometry = self._geometry
points = self._createShapeFromGeometry(geometry)
self.__shape.setPoints(points)
index = numpy.nanargmin(points[:, 1])
pos = points[index]
with utils.blockSignals(self._handleLabel):
self._handleLabel.setPosition(pos[0], pos[1])
if geometry.center is None:
movePos = geometry.startPoint * 0.34 + geometry.endPoint * 0.66
else:
movePos = geometry.center
with utils.blockSignals(self._handleMove):
self._handleMove.setPosition(*movePos)
self.sigRegionChanged.emit()
def getGeometry(self):
"""Returns a tuple containing the geometry of this ROI
It is a symmetric function of :meth:`setGeometry`.
If `startAngle` is smaller than `endAngle` the rotation is clockwise,
else the rotation is anticlockwise.
:rtype: Tuple[numpy.ndarray,float,float,float,float]
:raise ValueError: In case the ROI can't be represented as section of
a circle
"""
geometry = self._geometry
if geometry.center is None:
raise ValueError("This ROI can't be represented as a section of circle")
return geometry.center, self.getInnerRadius(), self.getOuterRadius(), geometry.startAngle, geometry.endAngle
def isClosed(self):
"""Returns true if the arc is a closed shape, like a circle or a donut.
:rtype: bool
"""
return self._geometry.isClosed()
def getCenter(self):
"""Returns the center of the circle used to draw arcs of this ROI.
This center is usually outside the the shape itself.
:rtype: numpy.ndarray
"""
return self._geometry.center
def getStartAngle(self):
"""Returns the angle of the start of the section of this ROI (in radian).
If `startAngle` is smaller than `endAngle` the rotation is clockwise,
else the rotation is anticlockwise.
:rtype: float
"""
return self._geometry.startAngle
def getEndAngle(self):
"""Returns the angle of the end of the section of this ROI (in radian).
If `startAngle` is smaller than `endAngle` the rotation is clockwise,
else the rotation is anticlockwise.
:rtype: float
"""
return self._geometry.endAngle
def getInnerRadius(self):
"""Returns the radius of the smaller arc used to draw this ROI.
:rtype: float
"""
geometry = self._geometry
radius = geometry.radius - geometry.weight * 0.5
if radius < 0:
radius = 0
return radius
def getOuterRadius(self):
"""Returns the radius of the bigger arc used to draw this ROI.
:rtype: float
"""
geometry = self._geometry
radius = geometry.radius + geometry.weight * 0.5
return radius
def setGeometry(self, center, innerRadius, outerRadius, startAngle, endAngle):
"""
Set the geometry of this arc.
:param numpy.ndarray center: Center of the circle.
:param float innerRadius: Radius of the smaller arc of the section.
:param float outerRadius: Weight of the bigger arc of the section.
It have to be bigger than `innerRadius`
:param float startAngle: Location of the start of the section (in radian)
:param float endAngle: Location of the end of the section (in radian).
If `startAngle` is smaller than `endAngle` the rotation is clockwise,
else the rotation is anticlockwise.
"""
assert innerRadius <= outerRadius
assert numpy.abs(startAngle - endAngle) <= 2 * numpy.pi
center = numpy.array(center)
radius = (innerRadius + outerRadius) * 0.5
weight = outerRadius - innerRadius
vector = numpy.array([numpy.cos(startAngle), numpy.sin(startAngle)])
startPoint = center + vector * radius
vector = numpy.array([numpy.cos(endAngle), numpy.sin(endAngle)])
endPoint = center + vector * radius
geometry = _ArcGeometry(center, startPoint, endPoint,
radius, weight,
startAngle, endAngle, closed=None)
self._geometry = geometry
self._updateHandles()
@docstring(HandleBasedROI)
def contains(self, position):
# first check distance, fastest
center = self.getCenter()
distance = numpy.sqrt((position[1] - center[1]) ** 2 + ((position[0] - center[0])) ** 2)
is_in_distance = self.getInnerRadius() <= distance <= self.getOuterRadius()
if not is_in_distance:
return False
rel_pos = position[1] - center[1], position[0] - center[0]
angle = numpy.arctan2(*rel_pos)
# angle is inside [-pi, pi]
# Normalize the start angle between [-pi, pi]
# with a positive angle range
start_angle = self.getStartAngle()
end_angle = self.getEndAngle()
azim_range = end_angle - start_angle
if azim_range < 0:
start_angle = end_angle
azim_range = -azim_range
start_angle = numpy.mod(start_angle + numpy.pi, 2 * numpy.pi) - numpy.pi
if angle < start_angle:
angle += 2 * numpy.pi
return start_angle <= angle <= start_angle + azim_range
def translate(self, x, y):
self._geometry = self._geometry.translated(x, y)
self._updateHandles()
def _arcCurvatureMarkerConstraint(self, x, y):
"""Curvature marker remains on perpendicular bisector"""
geometry = self._geometry
if geometry.center is None:
center = (geometry.startPoint + geometry.endPoint) * 0.5
vector = geometry.startPoint - geometry.endPoint
vector = numpy.array((vector[1], -vector[0]))
vdist = numpy.linalg.norm(vector)
if vdist != 0:
normal = numpy.array((vector[1], -vector[0])) / vdist
else:
normal = numpy.array((0, 0))
else:
if geometry.isClosed():
midAngle = geometry.startAngle + numpy.pi * 0.5
else:
midAngle = (geometry.startAngle + geometry.endAngle) * 0.5
normal = numpy.array([numpy.cos(midAngle), numpy.sin(midAngle)])
center = geometry.center
dist = numpy.dot(normal, (numpy.array((x, y)) - center))
dist = numpy.clip(dist, geometry.radius, geometry.radius * 2)
x, y = center + dist * normal
return x, y
@staticmethod
def _circleEquation(pt1, pt2, pt3):
"""Circle equation from 3 (x, y) points
:return: Position of the center of the circle and the radius
:rtype: Tuple[Tuple[float,float],float]
"""
x, y, z = complex(*pt1), complex(*pt2), complex(*pt3)
w = z - x
w /= y - x
c = (x - y) * (w - abs(w) ** 2) / 2j / w.imag - x
return | numpy.array((-c.real, -c.imag)) | numpy.array |
from __future__ import division
import numpy as np
import copy
from pysph.base.nnps import LinkedListNNPS
from pysph.base.utils import get_particle_array, get_particle_array_wcsph
from cyarray.api import UIntArray
from numpy.linalg import norm, matrix_power
from pysph.sph.equation import Equation
from pysph.tools.sph_evaluator import SPHEvaluator
from pysph.base.particle_array import ParticleArray
def distance(point1, point2=np.array([0.0, 0.0, 0.0])):
return np.sqrt(sum((point1 - point2) * (point1 - point2)))
def distance_2d(point1, point2=np.array([0.0, 0.0])):
return np.sqrt(sum((point1 - point2) * (point1 - point2)))
def matrix_exp(matrix):
"""
Exponential of a matrix.
Finds the exponential of a square matrix of any order using the
formula exp(A) = I + (A/1!) + (A**2/2!) + (A**3/3!) + .........
Parameters
----------
matrix : numpy matrix of order nxn (square) filled with numbers
Returns
-------
result : numpy matrix of the same order
Examples
--------
>>>A = np.matrix([[1, 2],[2, 3]])
>>>matrix_exp(A)
matrix([[19.68002699, 30.56514746],
[30.56514746, 50.24517445]])
>>>B = np.matrix([[0, 0],[0, 0]])
>>>matrix_exp(B)
matrix([[1., 0.],
[0., 1.]])
"""
matrix = np.asarray(matrix)
tol = 1.0e-16
result = matrix_power(matrix, 0)
n = 1
condition = True
while condition:
adding = matrix_power(matrix, n) / (1.0 * np.math.factorial(n))
result += adding
residue = np.sqrt(np.sum(np.square(adding)) /
np.sum(np.square(result)))
condition = (residue > tol)
n += 1
return result
def extrude(x, y, dx=0.01, extrude_dist=1.0, z_center=0.0):
"""
Extrudes a 2d geometry.
Takes a 2d geometry with x, y values and extrudes it in z direction by the
amount extrude_dist with z_center as center
Parameters
----------
x : 1d array object with numbers
y : 1d array object with numbers
dx : a number
extrude_dist : a number
z_center : a number
x, y should be of the same length and no x, y pair should be the same
Returns
-------
x_new : 1d numpy array object with new x values
y_new : 1d numpy array object with new y values
z_new : 1d numpy array object with z values
x_new, y_new, z_new are of the same length
Examples
--------
>>>x = np.array([0.0])
>>>y = np.array([0.0])
>>>extrude(x, y, 0.1, 0.2, 0.0)
(array([ 0., 0., 0.]),
array([ 0., 0., 0.]),
array([-0.1, 0., 0.1]))
"""
z = np.arange(z_center - extrude_dist / 2.,
z_center + (extrude_dist + dx) / 2., dx)
x_new = np.tile(np.asarray(x), len(z))
y_new = np.tile(np.asarray(y), len(z))
z_new = np.repeat(z, len(x))
return x_new, y_new, z_new
def translate(x, y, z, x_translate=0.0, y_translate=0.0, z_translate=0.0):
"""
Translates set of points in 3d cartisean space.
Takes set of points and translates each and every point by some
mentioned amount in all the 3 directions.
Parameters
----------
x : 1d array object with numbers
y : 1d array object with numbers
z : 1d array object with numbers
x_translate : a number
y_translate : a number
z_translate : a number
Returns
-------
x_new : 1d numpy array object with new x values
y_new : 1d numpy array object with new y values
z_new : 1d numpy array object with new z values
Examples
--------
>>>x = np.array([0.0, 1.0, 2.0])
>>>y = np.array([-1.0, 0.0, 1.5])
>>>z = np.array([0.5, -1.5, 0.0])
>>>translate(x, y, z, 1.0, -0.5, 2.0)
(array([ 1., 2., 3.]), array([-1.5, -0.5, 1.]), array([2.5, 0.5, 2.]))
"""
x_new = np.asarray(x) + x_translate
y_new = np.asarray(y) + y_translate
z_new = np.asarray(z) + z_translate
return x_new, y_new, z_new
def rotate(x, y, z, axis=np.array([0.0, 0.0, 1.0]), angle=90.0):
"""
Rotates set of points in 3d cartisean space.
Takes set of points and rotates each point with some angle w.r.t
a mentioned axis.
Parameters
----------
x : 1d array object with numbers
y : 1d array object with numbers
z : 1d array object with numbers
axis : 1d array with 3 numbers
angle(in degrees) : number
Returns
-------
x_new : 1d numpy array object with new x values
y_new : 1d numpy array object with new y values
z_new : 1d numpy array object with new z values
Examples
--------
>>>x = np.array([0.0, 1.0, 2.0])
>>>y = np.array([-1.0, 0.0, 1.5])
>>>z = np.array([0.5, -1.5, 0.0])
>>>axis = np.array([0.0, 0.0, 1.0])
>>>rotate(x, y, z, axis, 90.0)
(array([ 1.00000000e+00, 4.25628483e-17, -1.50000000e+00]),
array([-4.25628483e-17, 1.00000000e+00, 2.00000000e+00]),
array([ 0.5, -1.5, 0. ]))
"""
theta = angle * np.pi / 180.0
unit_vector = np.asarray(axis) / norm(np.asarray(axis))
matrix = np.cross(np.eye(3), unit_vector * theta)
rotation_matrix = matrix_exp(matrix)
new_points = []
for xi, yi, zi in zip(np.asarray(x), np.asarray(y), np.asarray(z)):
point = np.array([xi, yi, zi])
new = np.dot(rotation_matrix, point)
new_points.append(new)
new_points = np.array(new_points)
x_new = new_points[:, 0]
y_new = new_points[:, 1]
z_new = new_points[:, 2]
return x_new, y_new, z_new
def get_2d_wall(dx=0.01, center=np.array([0.0, 0.0]), length=1.0,
num_layers=1, up=True):
"""
Generates a 2d wall which is parallel to x-axis. The wall can be
rotated parallel to any axis using the rotate function. 3d wall
can be also generated using the extrude function after generating
particles using this function.
^
|
|
y|*******************
| wall particles
|
|____________________>
x
Parameters
----------
dx : a number which is the spacing required
center : 1d array like object which is the center of wall
length : a number which is the length of the wall
num_layers : Number of layers for the wall
up : True if the layers have to created on top of base wall
Returns
-------
x : 1d numpy array with x coordinates of the wall
y : 1d numpy array with y coordinates of the wall
"""
x = np.arange(-length / 2., length / 2. + dx, dx) + center[0]
y = np.ones_like(x) * center[1]
value = 1 if up else -1
for i in range(1, num_layers):
y1 = np.ones_like(x) * center[1] + value * i * dx
y = np.concatenate([y, y1])
return np.tile(x, num_layers), y
def get_2d_tank(dx=0.05, base_center=np.array([0.0, 0.0]), length=1.0,
height=1.0, num_layers=1, outside=True, staggered=False,
top=False):
"""
Generates an open 2d tank with the base parallel to x-axis and the side
walls parallel to y-axis. The tank can be rotated to any direction using
rotate function. 3d tank can be generated using extrude function.
^
|* *
|* 2d tank *
y|* particles *
|* *
|* * * * * * * * *
| base
|____________________>
x
Parameters
----------
dx : a number which is the spacing required
base_center : 1d array like object which is the center of base wall
length : a number which is the length of the base
height : a number which is the length of the side wall
num_layers : Number of layers for the tank
outside : A boolean value which decides if the layers are inside or outside
staggered : A boolean value which decides if the layers are staggered or not
top : A boolean value which decides if the top is present or not
Returns
-------
x : 1d numpy array with x coordinates of the tank
y : 1d numpy array with y coordinates of the tank
"""
dy = dx
fac = 1 if outside else 0
if staggered:
dx = dx/2
start = fac*(1 - num_layers)*dx
end = fac*num_layers*dx + (1 - fac) * dx
x, y = np.mgrid[start:length+end:dx, start:height+end:dy]
topset = 0 if top else 10*height
if staggered:
topset += dx
y[1::2] += dx
offset = 0 if outside else (num_layers-1)*dx
cond = ~((x > offset) & (x < length-offset) &
(y > offset) & (y < height+topset-offset))
return x[cond] + base_center[0] - length/2, y[cond] + base_center[1]
def get_2d_circle(dx=0.01, r=0.5, center=np.array([0.0, 0.0])):
"""
Generates a completely filled 2d circular area.
Parameters
----------
dx : a number which is the spacing required
r : a number which is the radius of the circle
center : 1d array like object which is the center of the circle
Returns
-------
x : 1d numpy array with x coordinates of the circle particles
y : 1d numpy array with y coordinates of the circle particles
"""
N = int(2.0 * r / dx) + 1
x, y = np.mgrid[-r:r:N * 1j, -r:r:N * 1j]
x, y = np.ravel(x), np.ravel(y)
condition = (x * x + y * y <= r * r)
x, y = x[condition], y[condition]
return x + center[0], y + center[1]
def get_2d_hollow_circle(dx=0.01, r=1.0, center=np.array([0.0, 0.0]),
num_layers=2, inside=True):
"""
Generates a hollow 2d circle with some number of layers either on the
inside or on the outside of the body which is taken as an argument
Parameters
----------
dx : a number which is the spacing required
r : a number which is the radius of the circle
center : 1d array like object which is the center of the circle
num_layers : a number (int)
inside : boolean (True or False). If this is True then the layers
are generated inside the circle
Returns
-------
x : 1d numpy array with x coordinates of the circle particles
y : 1d numpy array with y coordinates of the circle particles
"""
r_grid = r + dx * num_layers
N = int(2.0 * r_grid / dx) + 1
x, y = np.mgrid[-r_grid:r_grid:N * 1j, -r_grid:r_grid:N * 1j]
x, y = np.ravel(x), np.ravel(y)
if inside:
cond1 = (x * x + y * y <= r * r)
cond2 = (x * x + y * y >= (r - num_layers * dx)**2)
else:
cond1 = (x * x + y * y >= r * r)
cond2 = (x * x + y * y <= (r + num_layers * dx)**2)
cond = cond1 & cond2
x, y = x[cond], y[cond]
return x + center[0], y + center[0]
def get_3d_hollow_cylinder(dx=0.01, r=0.5, length=1.0,
center= | np.array([0.0, 0.0, 0.0]) | numpy.array |
import numpy as np
import cv2
import os
import torch.utils.data as data
import math
from utils.image import flip, color_aug
from utils.image import get_affine_transform, affine_transform
from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
from utils.image import draw_dense_reg
from opts import opts
import matplotlib.pyplot as plt
class CenterLandmarkDataset(data.Dataset):
def get_border(self, border, size):
i = 1
while size - border // i <= border // i:
i *= 2
return border // i
def __getitem__(self, index):
img_id = self.images[index]
#loadImgs(ids=[img_id]) return a list, whose length = 1
file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name']
img_path = os.path.join(self.img_dir, file_name)
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
anns = self.coco.loadAnns(ids=ann_ids)
num_objs = min(len(anns), self.max_objs)
cropped = False
if self.split == 'train':
if np.random.random() < 1:
cropped = True
file_name = file_name.split('.')[0]+'crop.jpg'
img_path = os.path.join(self.img_dir, file_name)
if self.split == 'val':
cropped = True
img = cv2.imread(img_path)
height, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)
s = max(img.shape[0], img.shape[1]) * 1.0
rot = 0
flipped = False
rotted = False
# input_res is max(input_h, input_w), input is the size of original img
if np.random.random() < self.opts.keep_inp_res_prob and max((height | 127) + 1, (width | 127) + 1) < 1024:
self.opts.input_h = (height | 127) + 1
self.opts.input_w = (width | 127) + 1
self.opts.output_h = self.opts.input_h // self.opts.down_ratio
self.opts.output_w = self.opts.input_w // self.opts.down_ratio
self.opts.input_res = max(self.opts.input_h, self.opts.input_w)
self.opts.output_res = max(self.opts.output_h, self.opts.output_w)
trans_input = get_affine_transform(
c, s, rot, [self.opts.input_res, self.opts.input_res])
inp = cv2.warpAffine(img, trans_input,
(self.opts.input_res, self.opts.input_res),
flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
inp = (inp - self.mean) / self.std
#change data shape to [3, input_size, input_size]
inp = inp.transpose(2, 0, 1)
#output_res is max(output_h, output_w), output is the size after down sampling
output_res = self.opts.output_res
num_joints = self.num_joints
trans_output_rot = get_affine_transform(c, s, rot, [output_res, output_res])
trans_output = get_affine_transform(c, s, 0, [output_res, output_res])
hm = np.zeros((self.num_classes, output_res, output_res), dtype=np.float32)
hm_hp = np.zeros((num_joints, output_res, output_res), dtype=np.float32)
dense_kps = np.zeros((num_joints, 2, output_res, output_res), dtype=np.float32)
dense_kps_mask = np.zeros((num_joints, output_res, output_res), dtype=np.float32)
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
kps = np.zeros((self.max_objs, 2*num_joints), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
ind = np.zeros((self.max_objs), dtype=np.int64)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
kps_mask = np.zeros((self.max_objs, self.num_joints * 2), dtype=np.uint8)
hp_offset = np.zeros((self.max_objs * num_joints, 2), dtype=np.float32)
hp_ind = np.zeros((self.max_objs * num_joints), dtype=np.int64)
hp_mask = np.zeros((self.max_objs * num_joints), dtype=np.int64)
draw_gaussian = draw_msra_gaussian if self.opts.mse_loss else \
draw_umich_gaussian
gt_det = []
for k in range(num_objs):
ann = anns[k]
if cropped:
bbox = np.array(ann['bbox'])
else:
bbox = | np.array(ann['org_bbox']) | numpy.array |
from __future__ import absolute_import
import copy
import logging
import numpy as np
import xarray as xr
import scipy.integrate
import scipy.interpolate
from collections import OrderedDict
try:
import pyproj
HAS_PYPROJ = True
except ImportError:
HAS_PYPROJ = False
from oceanwaves.utils import *
from oceanwaves.units import simplify
from oceanwaves.plot import OceanWavesPlotMethods
from oceanwaves.spectral import *
from oceanwaves.swan import *
from oceanwaves.datawell import *
from oceanwaves.wavedroid import *
# initialize logger
logger = logging.getLogger(__name__)
class OceanWaves(xr.Dataset):
'''Class to store (spectral) data of ocean waves
The class is a specific variant of an xarray.Dataset that defines
any selection of the following dimensions: time, location,
frequency and direction. The dependent variable is some form of
wave energy.
The class has methods to compute spectral moments and derived wave
properties, like the significant wave height and various spectral
wave periods. In addition the peak wave period and peak wave
directions can be computed using dedicated methods.
The class automatically converts locations from a local coordinate
reference system to lat/lon coordinates, if the local coordinate
reference system is specified.
The class interpretates combined variable units and simplifies the
result to practical entities.
The class provides two plotting routines: 1) plotting of spectral
wave data in a raster of subplots and 2) plotting of spectral wabe
data on a map.
The class supports all convenient properties of an xarray.Dataset,
like writing to netCDF or converting to pandas.DataFrame.
TODO:
* improve plotting routines
* add phase functions to use with tides: phase estimates, phase
interpolation, etc.
'''
SwanSpcReader = SwanSpcReader()
SwanTableReader = SwanTableReader()
Swan2DReader = Swan2DReader()
from_datawell = DatawellReader()
from_wavedroid = WaveDroidReader()
def __init__(self, time=None, location=None, frequency=None,
direction=None, energy=None, spreading=None,
time_units='s', location_units='m',
frequency_units='Hz', direction_units='deg',
energy_units='m^2/Hz', spreading_units='deg',
time_var='time', location_var='location',
frequency_var='frequency', direction_var='direction',
energy_var='energy', spreading_var='spreading',
frequency_convention='absolute',
direction_convention='nautical',
spreading_convention='cosine', spectral=True,
directional=True, attrs={}, crs=None, **kwargs):
'''Initialize class
Sets dimensions, converts coordinates and fills the dataset,
if data is provided.
Parameters
----------
time : iterable, optional
Time coordinates, each item can be a datetime object or
float
location : iterable of 2-tuples, optional
Location coordinates, each item is a 2-tuple with x- and
y-coordinates
frequency : iterable, optional
Frequency cooridinates
direction : iterable, optional
Direction coordinates
energy : matrix, optional
Wave energy
time_units : str, optional
Units of time coordinates (default: s)
location_units : str, optional
Units of location coordinates (default: m)
frequency_units : str, optional
Units of frequency coordinates (default: Hz)
direction_units : str, optional
Units of direction coordinates (default: deg)
energy_units : str, optional
Units of wave energy (default: m^2/Hz)
time_var : str, optional
Name of time variable (default: time)
location_var : str, optional
Name of location variable (default: location)
frequency_var : str, optional
Name of frequency variable (default: frequency)
direction_var : str, optional
Name of direction variable (default: direction)
energy_var : str, optional
Name of wave energy variable (default: energy)
frequency_convention : str, optional
Convention of frequency definition (default: absolute)
direction_convention : str, optional
Convention of direction definition (default: nautical)
attrs : dict-like, optional
Global attributes
crs : str, optional
Proj4 specification of local coordinate reference system
kwargs : dict, optional
Additional options passed to the xarray.Dataset
initialization method
See Also
--------
oceanwaves.OceanWaves.reinitialize
'''
dims = []
coords = OrderedDict()
data_vars = OrderedDict()
# simplify dimensions
time = np.asarray(time)
location = np.asarray(location)
frequency = np.asarray(frequency, dtype=np.float)
direction = np.asarray(direction, dtype=np.float)
spreading = np.asarray(spreading, dtype=np.float)
energy = np.asarray(energy, dtype=np.float)
# simplify units
time_units = simplify(time_units)
location_units = simplify(location_units)
frequency_units = simplify(frequency_units)
direction_units = simplify(direction_units)
energy_units = simplify(energy_units)
# determine object dimensions
if self._isvalid(time):
dims.append(time_var)
coords[time_var] = xr.Variable(
time_var,
time
)
# only set time units if given. otherwise a datetime
# object is assumed that is encoded by xarray. setting
# units manually in that case would raise an exception if
# the dataset is written to CF-compatible netCDF.
if time_units is None or time_units != '':
coords[time_var].attrs.update(dict(units=time_units))
if self._isvalid(location):
dims.append(location_var)
coords[location_var] = xr.Variable(
location_var,
np.arange(len(location))
)
x, y = list(zip(*location))
coords['%s_x' % location_var] = xr.Variable(
location_var,
np.asarray(x),
attrs=dict(units=location_units)
)
coords['%s_y' % location_var] = xr.Variable(
location_var,
np.asarray(y),
attrs=dict(units=location_units)
)
coords['%s_lat' % location_var] = xr.Variable(
location_var,
np.asarray(x) + np.nan,
attrs=dict(units='degN')
)
coords['%s_lon' % location_var] = xr.Variable(
location_var,
np.asarray(y) + np.nan,
attrs=dict(units='degE')
)
if self._isvalid(frequency, mask=frequency>0) and spectral:
dims.append(frequency_var)
coords[frequency_var] = xr.Variable(
frequency_var,
frequency[frequency>0],
attrs=dict(units=frequency_units)
)
if self._isvalid(direction) and directional:
dims.append(direction_var)
coords[direction_var] = xr.Variable(
direction_var,
direction,
attrs=dict(units=direction_units)
)
# determine object shape
shp = tuple([len(c) for k, c in coords.items() if k in dims])
# initialize energy variable
data_vars[energy_var] = xr.DataArray(
np.nan + np.zeros(shp),
dims=dims,
coords=coords,
attrs=dict(units=energy_units)
)
# store parameterized frequencies
if not spectral:
if self._isvalid(frequency):
data_vars[frequency_var] = xr.DataArray(
frequency,
dims=dims,
coords=coords,
attrs=dict(units=direction_units)
)
# store parameterized directions
if not directional:
if self._isvalid(direction):
data_vars[direction_var] = xr.DataArray(
direction,
dims=dims,
coords=coords,
attrs=dict(units=direction_units)
)
if self._isvalid(spreading):
data_vars[spreading_var] = xr.DataArray(
spreading,
dims=dims,
coords=coords,
attrs=dict(units=spreading_units)
)
# collect global attributes
attrs.update(dict(
_init=kwargs.copy(),
_crs=crs,
_names=dict(
time = time_var,
location = location_var,
frequency = frequency_var,
direction = direction_var,
spreading = spreading_var,
energy = energy_var
),
_units=dict(
time = time_units,
location = location_units,
frequency = frequency_units,
direction = direction_units,
energy = energy_units
),
_conventions=dict(
frequency = frequency_convention,
direction = direction_convention,
spreading = spreading_convention
)
))
# initialize empty object
super(OceanWaves, self).__init__(
data_vars=data_vars,
coords=coords,
attrs=attrs,
**kwargs
)
# set wave energy
if self._isvalid(energy):
self['_energy'] = dims, energy.reshape(shp)
# convert coordinates
self.convert_coordinates(crs)
@classmethod
def from_dataset(cls, dataset, *args, **kwargs):
'''Initialize class from xarray.Dataset or OceanWaves object
Parameters
----------
dataset : xarray.Dataset or Oceanwaves
Base object
'''
if isinstance(dataset, OceanWaves):
kwargs = dataset._extract_initialization_args(**kwargs)
elif isinstance(dataset, xr.Dataset):
obj = OceanWaves(*args, **kwargs)
dataset = obj._expand_locations(dataset)
obj = obj.merge(dataset)
kwargs = obj._extract_initialization_args(**kwargs)
obj = cls(*args, **kwargs)
obj.merge(dataset, inplace=True)
return obj
def reinitialize(self, **kwargs):
'''Reinitializes current object with modified parameters
Gathers current object's initialization settings and updates
them with the given initialization options. Then initializes a
new object with the resulting option set. See for all
supported options the initialization method of this class.
Parameters
----------
kwargs : dict
Keyword/value pairs with initialization options that need
to be overwritten
Returns
-------
OceanWaves
New OceanWaves object
'''
settings = self._extract_initialization_args(**kwargs)
return OceanWaves(**settings).restore(self)
def iterdim(self, dim):
'''Iterate over given dimension
Parameters
----------
dim : str
Name of dimension
'''
k = self._key_lookup(dim)
if k in self.dims.keys():
for i in range(len(self[k])):
yield self.isel(**{k:i})
else:
yield self
def _extract_initialization_args(self, **kwargs):
'''Return updated initialization settings
Parameters
----------
kwargs : dict
Keyword/value pairs with initialization options that need
to be overwritten
Returns
-------
dict
Dictionary with initialization arguments
'''
settings = dict(crs = self.attrs['_crs'],
attrs = dict([
(k, v)
for k, v in self.attrs.items()
if not k.startswith('_')
]))
# add dimensions
for dim in ['direction', 'frequency', 'time']:
if self.has_dimension(dim):
k = self._key_lookup('_%s' % dim)
v = self.coords[k]
settings[dim] = v.values
if 'units' in v.attrs:
settings['%s_units' % dim] = v.attrs['units']
# add locations
if self.has_dimension('location'):
k = self._key_lookup('_location')
x = self.variables['%s_x' % k].values
y = self.variables['%s_y' % k].values
settings['location'] = list(zip(x, y))
settings['location_units'] = self.variables['%s_x' % k].attrs['units']
# add energy
k = self._key_lookup('_energy')
v = self.variables[k]
settings['energy'] = v.values
if 'units' in v.attrs:
settings['energy_units'] = v.attrs['units']
# add variable names
for k, v in self.attrs['_names'].items():
settings['%s_var' % k] = v
# add additional arguments
settings.update(self.attrs['_init'])
settings.update(kwargs)
return settings
def Hm0(self, f_min=0, f_max=np.inf):
'''Compute significant wave height based on zeroth order moment
Parameters
----------
f_min : float
Minimum frequency to include in moment
f_max : float
Maximum frequency to include in moment
Returns
-------
H : xarray.DataArray
Significant wave height at each point in time and location
in the dataset
'''
# compute moments
m0 = self.moment(0, f_min=f_min, f_max=f_max)
# compute wave height
H = 4. * np.sqrt(m0)
# determine units
units = '(%s)^0.5' % m0.attrs['units']
H.attrs['units'] = simplify(units)
return H
def Tm01(self):
'''Compute wave period based on first order moment
Returns
-------
T : xarray.DataArray
Spectral wave period at each point in time and location in
the dataset
'''
# compute moments
m0 = self.moment(0)
m1 = self.moment(1)
# compute wave period
T = m0/m1
# determine units
units = '(%s)/(%s)' % (m0.attrs['units'], m1.attrs['units'])
T.attrs['units'] = simplify(units)
return T
def Tm02(self):
'''Compute wave period based on second order moment
Returns
-------
T : xarray.DataArray
Spectral wave period at each point in time and location in
the dataset
'''
# compute moments
m0 = self.moment(0)
m2 = self.moment(2)
# compute wave period
T = np.sqrt(m0/m2)
# determine units
units = '((%s)/(%s))^0.5' % (m0.attrs['units'], m2.attrs['units'])
T.attrs['units'] = simplify(units)
return T
def Tp(self):
'''Alias for :meth:`oceanwaves.OceanWaves.peak_period`
'''
return self.peak_period()
def peak_period(self):
'''Compute peak wave period
Returns
-------
T : xarray.DataArray
Peak wave period at each point in time and location in the
dataset
'''
if self.has_dimension('frequency', raise_error=True):
coords = OrderedDict(self.coords)
k = self._key_lookup('_energy')
E = self.variables[k]
dims = list(E.dims)
# determine peak frequencies
k = self._key_lookup('_frequency')
f = coords.pop(k).values
ix = E.argmax(dim=k).values
peaks = 1. / f[ix.flatten()].reshape(ix.shape)
dims.remove(k)
# determine units
units = '1/(%s)' % self.variables[k].attrs['units']
units = simplify(units)
return xr.DataArray(peaks, coords=coords, dims=dims,
attrs=dict(units=units))
def peak_direction(self):
'''Compute peak wave direction
Returns
-------
theta : xarray.DataArray
Peak wave direction at each point in time and location in
the dataset
'''
if self.has_dimension('direction', raise_error=True):
coords = OrderedDict(self.coords)
k = self._key_lookup('_energy')
E = self.variables[k]
dims = list(E.dims)
# determine peak directions
k = self._key_lookup('_direction')
theta = coords.pop(k).values
ix = E.argmax(dim=k).values
peaks = theta[ix.flatten()].reshape(ix.shape)
dims.remove(k)
# determine units
units = self.variables[k].attrs['units']
units = simplify(units)
return xr.DataArray(peaks, coords=coords, dims=dims,
attrs=dict(units=units))
def directional_spreading(self):
'''Estimate directional spreading
Estimate directional spreading by assuming a gaussian
distribution and computing the variance of the directional
spreading. The directional spreading is assumed to be
``3000/var``.
Notes
-----
This estimate is inaccurate and should be improved based on
the cosine model.
'''
if self.has_dimension('direction', raise_error=True):
coords = OrderedDict(self.coords)
k = self._key_lookup('_energy')
E = self.variables[k]
dims = list(E.dims)
# determine peak directions
k = self._key_lookup('_direction')
theta = coords.pop(k).values
ix_direction = dims.index(k)
dims.remove(k)
# determine directional spreading
E /= expand_and_repeat(
np.trapz(E, theta, axis=ix_direction),
repeat=len(theta),
expand_dims=ix_direction
)
m1 = np.trapz(theta * E, theta)
m2 = np.trapz(theta**2. * E, theta)
var = m2 - m1**2.
spreading = np.round(3000./var / 5.) * 5.
# determine units
units = self.variables[k].attrs['units']
units = simplify(units)
return xr.DataArray(spreading, coords=coords, dims=dims,
attrs=dict(units=units))
def moment(self, n, f_min=0., f_max=np.inf):
'''Compute nth order moment of wave spectrum
Parameters
----------
n : int
Order of moment
f_min : float
Minimum frequency to include in moment
f_max : float
Maximum frequency to include in moment
Returns
-------
m : xarray.DataArray
nth order moment of the wave spectrum at each point in
time and location in the dataset
'''
if self.has_dimension('frequency', raise_error=True):
coords = OrderedDict(self.coords)
k = self._key_lookup('_energy')
E = self.variables[k]
dims = list(E.dims)
# integrate frequencies
k = self._key_lookup('_frequency')
f = coords.pop(k).values
ix_frequency = dims.index(k)
f_mtx = expand_and_repeat(
f,
shape=E.values.shape,
exist_dims=ix_frequency
)
dims.remove(k)
if f_min == 0. and f_max == np.inf:
m = np.trapz(E.values * f_mtx**n, f_mtx, axis=ix_frequency)
else:
if n != 0:
logger.warn('Computing %d-order moment using a frequency range; '
'Are you sure what you are doing?', n)
# integrate range of frequencies
f_min = np.maximum(f_min, | np.min(f) | numpy.min |
import numpy as np
import matplotlib.pyplot as plt
import os
import shutil
from contomo import phantom
from contomo import projected_advection_pde
from contomo import flow_model
from contomo import velocity_solver
from contomo import basis
from contomo import ray_model
from contomo import sinogram_interpolator
from contomo import utils
ph = phantom.Spheres.load( "/home/axel/Downloads/spherephantom.phantom" )
dx = ph.detector_pixel_size
sinograms, sample_times, angles = ph.get_sorted_sinograms_times_and_angles(labels="Dynamic scan")
si = sinogram_interpolator.SinogramInterpolator( sample_times, sinograms, smoothness=0, order=2)
initial_volume = | np.load("/home/axel/Downloads/intermediate_volume_0000.npy") | numpy.load |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 24 09:18:44 2020
@author: <NAME> <EMAIL>
@author: matheustorquato <EMAIL>
"""
import functools, os
import matplotlib.pyplot as plt
import numpy as np
import datetime as dt
import pandas as pd
import logging
from functools import reduce
import scipy.integrate as spi
#from pyswarms.single.global_best import GlobalBestPSO
import pyswarms as ps
from pyswarms.backend.topology import Star
from pyswarms.utils.plotters import plot_cost_history
from itertools import repeat
class SIR:
''' SIR Model'''
def __init__(self,tamanhoPop,numeroProcessadores=None):
self.N = tamanhoPop
self.numeroProcessadores = numeroProcessadores
def __cal_EDO(self,x,beta,gamma):
ND = len(x)-1
t_start = 0.0
t_end = ND
t_inc = 1
t_range = np.arange(t_start, t_end + t_inc, t_inc)
beta = np.array(beta)
gamma = np.array(gamma)
def SIR_diff_eqs(INP, t, beta, gamma):
Y = np.zeros((3))
V = INP
Y[0] = - beta * V[0] * V[1] #S
Y[1] = beta * V[0] * V[1] - gamma * V[1] #I
Y[2] = gamma * V[1] #R
return Y
result_fit = spi.odeint(SIR_diff_eqs, (self.S0, self.I0,self.R0), t_range,
args=(beta, gamma))
S=result_fit[:, 0]*self.N
R=result_fit[:, 2]*self.N
I=result_fit[:, 1]*self.N
return S,I,R
def __cal_EDO_2(self,x,beta1,gamma,beta2,tempo):
ND = len(x)-1
t_start = 0.0
t_end = ND
t_inc = 1
t_range = np.arange(t_start, t_end + t_inc, t_inc)
def H(t):
h = 1.0/(1.0+ np.exp(-2.0*50*t))
return h
def beta(t,t1,b,b1):
beta = b*H(t1-t) + b1*H(t-t1)
return beta
gamma = np.array(gamma)
def SIR_diff_eqs(INP, t, beta1, gamma,beta2,t1):
Y = np.zeros((3))
V = INP
Y[0] = - beta(t,t1,beta1,beta2) * V[0] * V[1] #S
Y[1] = beta(t,t1,beta1,beta2) * V[0] * V[1] - gamma * V[1] #I
Y[2] = gamma * V[1] #R
return Y
result_fit = spi.odeint(SIR_diff_eqs, (self.S0, self.I0,self.R0), t_range,
args=(beta1, gamma,beta2,tempo))
S=result_fit[:, 0]*self.N
R=result_fit[:, 2]*self.N
I=result_fit[:, 1]*self.N
return S,I,R
def objectiveFunction(self,coef,x ,y,stand_error):
tam2 = len(coef[:,0])
soma = np.zeros(tam2)
y = y*self.N
if stand_error:
if (self.beta_variavel) & (self.day_mudar==None):
for i in range(tam2):
S,I,R = self.__cal_EDO_2(x,coef[i,0],coef[i,1],coef[i,2],coef[i,3])
soma[i]= (((y-(I+R))/np.sqrt((I+R)+1))**2).mean()
elif self.beta_variavel:
for i in range(tam2):
S,I,R = self.__cal_EDO_2(x,coef[i,0],coef[i,1],coef[i,2],self.day_mudar)
soma[i]= (((y-(I+R))/np.sqrt((I+R)+1))**2).mean()
else:
for i in range(tam2):
S,I,R = self.__cal_EDO(x,coef[i,0],coef[i,1])
soma[i]= (((y-(I+R))/np.sqrt((I+R)+1))**2).mean()
else:
if (self.beta_variavel) & (self.day_mudar==None):
for i in range(tam2):
S,I,R = self.__cal_EDO_2(x,coef[i,0],coef[i,1],coef[i,2],coef[i,3])
soma[i]= (((y-(I+R)))**2).mean()
elif self.beta_variavel:
for i in range(tam2):
S,I,R = self.__cal_EDO_2(x,coef[i,0],coef[i,1],coef[i,2],self.day_mudar)
soma[i]= (((y-(I+R)))**2).mean()
else:
for i in range(tam2):
S,I,R = self.__cal_EDO(x,coef[i,0],coef[i,1])
soma[i]= (((y-(I+R)))**2).mean()
return soma
def fit(self, x,y , bound = ([0,1/21],[1,1/5]),stand_error=True, beta2=True,day_mudar = None,particles=50,itera=500,c1= 0.5, c2= 0.3, w = 0.9, k=3,p=1):
'''
x = dias passados do dia inicial 1
y = numero de casos
bound = intervalo de limite para procura de cada parametro, onde None = sem limite
bound => (lista_min_bound, lista_max_bound)
'''
self.beta_variavel = beta2
self.day_mudar = day_mudar
self.y = y
self.x = x
df = np.array(y)/self.N
self.I0 = df[0]
self.S0 = 1-self.I0
self.R0 = 0
options = {'c1': c1, 'c2': c2, 'w': w,'k':k,'p':p}
optimizer = None
if bound==None:
if (beta2) & (day_mudar==None):
optimizer = ps.single.LocalBestPSO(n_particles=particles, dimensions=4, options=options)
elif beta2:
optimizer = ps.single.LocalBestPSO(n_particles=particles, dimensions=3, options=options)
else:
optimizer = ps.single.LocalBestPSO(n_particles=particles, dimensions=2, options=options)
else:
if (beta2) & (day_mudar==None):
if len(bound[0])==2:
bound = (bound[0].copy(),bound[1].copy())
bound[0].append(bound[0][0])
bound[1].append(bound[1][0])
bound[0].append(x[4])
bound[1].append(x[-5])
bound[0][3] = x[4]
bound[1][3] = x[-5]
optimizer = ps.single.LocalBestPSO(n_particles=particles, dimensions=4, options=options,bounds=bound)
elif beta2:
if len(bound[0])==2:
bound = (bound[0].copy(),bound[1].copy())
bound[0].append(bound[0][1])
bound[1].append(bound[1][1])
optimizer = ps.single.LocalBestPSO(n_particles=particles, dimensions=3, options=options,bounds=bound)
else:
optimizer = ps.single.LocalBestPSO(n_particles=particles, dimensions=2, options=options,bounds=bound)
cost = pos = None
if beta2:
cost, pos = optimizer.optimize(self.objectiveFunction, itera, x = x,y=df,stand_error=stand_error,n_processes=self.numeroProcessadores)
else:
cost, pos = optimizer.optimize(self.objectiveFunction, itera, x = x,y=df,stand_error=stand_error,n_processes=self.numeroProcessadores)
self.beta = pos[0]
self.gamma = pos[1]
if beta2:
self.beta1 = pos[0]
self.gamma = pos[1]
self.beta2 = pos[2]
if day_mudar==None:
self.day_mudar = pos[3]
else:
self.day_mudar = day_mudar
self.rmse = cost
self.optimize = optimizer
def predict(self,x):
''' x = dias passados do dia inicial 1'''
if self.beta_variavel:
S,I,R = self.__cal_EDO_2(x,self.beta1,self.gamma,self.beta2,self.day_mudar)
else:
S,I,R = self.__cal_EDO(x,self.beta,self.gamma)
self.ypred = I+R
self.S = S
self.I = I
self.R = R
return self.ypred
def getResiduosQuadatico(self):
y = np.array(self.y)
ypred = np.array(self.ypred)
y = y[0:len(self.x)]
ypred = ypred[0:len(self.x)]
return (y - ypred)**2
def getReQuadPadronizado(self):
y = np.array(self.y)
ypred = np.array(self.ypred)
y = y[0:len(self.x)]
ypred = ypred[0:len(self.x)]
res = ((y - ypred)**2)/np.sqrt(ypred+1)
return res
def plotCost(self):
plot_cost_history(cost_history=self.optimize.cost_history)
plt.show()
def plot(self,local):
ypred = self.predict(self.x)
plt.plot(ypred,c='b',label='Predição Infectados')
plt.plot(self.y,c='r',marker='o', markersize=3,label='Infectados')
plt.legend(fontsize=15)
plt.title('Dinâmica do CoviD19 - {}'.format(local),fontsize=20)
plt.ylabel('Casos COnfirmados',fontsize=15)
plt.xlabel('Dias',fontsize=15)
plt.show()
def getCoef(self):
if self.beta_variavel:
return ['beta1','beta2','gamma','dia_mudanca'],[self.beta1,self.beta2,self.gamma,self.day_mudar]
return ['beta','gamma'], [self.beta,self.gamma]
def plotFit(self):
plt.style.use('seaborn-deep')
fig, axes = plt.subplots(figsize = (18,8))
try:
plt.plot(self.x, self.ypred, label = "Fitted", c = "red")
plt.scatter(self.x, self.y, label = "Observed", c = "blue")
plt.legend(loc='upper left')
plt.show()
except:
print("There is no predicted value")
class SEIRHUD:
''' SEIRHU Model'''
def __init__(self,tamanhoPop,numeroProcessadores=None):
self.N = tamanhoPop
self.numeroProcessadores = numeroProcessadores
def __cal_EDO(self,x,beta,gammaH,gammaU,delta,h,ia0,is0,e0):
ND = len(x)-1
t_start = 0.0
t_end = ND
t_inc = 1
t_range = | np.arange(t_start, t_end + t_inc, t_inc) | numpy.arange |
#!/usr/bin/python3.8
import numpy as np
from scipy.fft import fft,rfft,fftshift,irfft,ifftshift
from scipy.signal import gaussian,find_peaks
from scipy.optimize import curve_fit
from helper import zero_padding,chop_win # helpers
from constants import *
import helper
#%% Constants specific to this file
ALPHA = 40.0 # LEARNING RATE
EPSILON = 15.0 # finite difference derivative step
EPOCHS = 500 # Number of epochs
ITERATIONS = 3 # number of steps per epoch
N_DIM = 6
# N_SAMPLES_DIM = 2
# Hyper params for loss function
N_SAMPLES = 256*4
N_THETAS = 260
THRESH = 0.001
#%% more efficient chop win that downsamples first
def chop_win_downsample(w,n_samples=256,ntap=NTAP,lblock=LBLOCK):
# assumes ntap*lblock == len(w)
s = np.array(np.linspace(0,lblock*(1-1/n_samples),n_samples),dtype='int') # the sample indices
return np.reshape(w,(ntap,lblock)).T[s]
#%% loss functions
def quantization_loss_1(window):
"""Loss function for quantization errors
Params : window function (e.g. SINC)
Returns : loss
the loss is the sum of recipricals of the eigenvalues
"""
w2d = chop_win(window,NTAP,LBLOCK)
w2d_padded = zero_padding(w2d)
ft = np.apply_along_axis(rfft,1,w2d_padded)
ft_abs_flat = np.abs(ft).flatten()+0.07
# add cnst term (0.07) so that nothing explodes in sum below
loss = np.sum(1/ft_abs_flat)
return loss # this brings current value to zero # quite bad
def q_loss_method_0(vals,thresh=THRESH):
return np.count_nonzero(vals<=thresh)
def q_loss_method_1(vals):
"""Loss function for quantization errors
Params :
vals : float[] -- array of samples of frequency eigenvalues (norm squared)
Returns :
loss : float -- the loss function
The loss is caluclated below -- ni is the number of values below 0.i
"""
vsqrt = np.sqrt(vals)
n1 = np.count_nonzero(vsqrt<=0.1)
n2 = np.count_nonzero(vsqrt<=0.2) - n1
n3 = np.count_nonzero(vsqrt<=0.3) - n2 - n1
n4 = np.count_nonzero(vsqrt<=0.4) - n1 - n2 - n3
n5 = np.count_nonzero(vsqrt<=0.5) - n1 - n2 - n3 - n4
return n1 + 0.6*n2 + 0.3*n3 + 0.15*n4 + 0.1*n5
def q_loss_method_2(vals):
"""Loss function for quantization errors
Params :
vals : float[] -- array of samples of frequency eigenvalues (norm squared)
Returns :
loss : float -- the loss function
The loss is caluclated below -- ni is the number of values below 0.i
"""
n1 = np.count_nonzero(vals<=0.1)
n2 = np.count_nonzero(vals<=0.2) - n1
n3 = np.count_nonzero(vals<=0.3) - n2 - n1
n4 = np.count_nonzero(vals<=0.4) - n1 - n2 - n3
n5 = np.count_nonzero(vals<=0.5) - n1 - n2 - n3 - n4
return n1 + 0.6*n2 + 0.3*n3 + 0.15*n4 + 0.1*n5
def q_loss_method_3(vals):
return np.sum(1/(0.1+np.sqrt(vals)))
def q_loss_method_4(vals):
return np.sum(1/(0.1+vals))
def quantization_sample_single_fourier_value_at_pi(window,n_samples=128):
"""Loss function for quantizaion errors
Params : float[] window function (e.g. SINC)
Resturns : loss
The loss is evaluated as follows
chunk up the window
samples k chunks (where k is a number like 256 for instance)
evaluate y=|W(x)| at x=PI -- where W is the RDFT of [a,b,c,d,0,0,0,...,0] normalized so that x is between 0 and PI
return 1/(y+0.1) -- so 10 if it's 0, 5 if it's 0.1, 3. if it's 0.2, 2.5 if it's 0.3
also try return 1/(y**2+0.1)
"""
p2d = chop_win_downsample(window,n_samples=n_samples)
min_guess_ft = lambda arr: sum(arr * np.array((1,-1,1,-1)))# takes as input an array with four elements
vals = np.apply_along_axis(min_guess_ft,1,p2d)**2 # square or else negative values...
return vals
def quantization_sample_fourier_values(window,n_samples=128,n_thetas=50,thetas_cust=None):
"""Loss function for quantization errors
Params, Returns same as above
The loss is evaluated as follows
Same as above except, evaluates multiple points on array
"""
# n_samples = 128 # the number of columns to sample in the
# n_thetas = 50 # number of thetas to sample from
sin = np.sin
cos = np.cos
p2d = chop_win_downsample(window,n_samples=n_samples)
symmetrise = lambda m: m + m.T + np.diag((1,1,1,1)) # fills diagonal with ones
def matrix(t):
m = np.array(((0,cos(t),cos(2*t),cos(3*t)),
(0,0,cos(t)*cos(2*t)+sin(t)*sin(2*t),cos(t)*cos(3*t)+sin(t)*sin(3*t)),
(0,0,0,cos(2*t)*cos(3*t)+sin(3*t)),
(0,0,0,0)))
return symmetrise(m)
if type(thetas_cust)!=type(None):theta_arr = thetas_cust
else:theta_arr= np.linspace(0,PI,n_thetas)
p3d = np.repeat(np.array([p2d]),len(theta_arr),axis=0)
eval_at_t = lambda arr1d,t: np.dot(arr1d,np.dot(matrix(t),arr1d)) # optionally put a square root here
def eval_at_t_block_2d(idx,p2d,t):
vals[idx] = np.apply_along_axis(eval_at_t,1,p2d,t)
vals = np.zeros((len(theta_arr),n_samples))
for idx,(arr2d,t) in enumerate(zip(p3d,theta_arr)):
eval_at_t_block_2d(idx,arr2d,t)
vals = vals.flatten()**2
return vals
# First loss function suggested by Jon
def loss_eig(window):
eigs = quantization_sample_fourier_values(window,n_samples=256,n_thetas=50)
eigs[np.where(eigs>=1.0)] = 1.0 # we don't care about large eigenvalues
return 1 - np.mean(eigs / (0.1 + eigs)) # minimize this
# Second loss function suggested by Jon
def loss_width_height(window):
large_box = helper.window_pad_to_box_rfft(window,4.0)
lb = np.abs(large_box)
width = find_peaks(-lb[5:])[0][0] # assumes there is a peak
loss_width = np.abs(width) / 10 # 0 for SINC
loss_log_height = max( | np.log(lb[width:]) | numpy.log |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from numpy.testing import assert_allclose
import numpy as np
import astropy.units as u
from ...utils.testing import requires_data
from ...irf import EffectiveAreaTable, EnergyDispersion
from ..core import CountsSpectrum
from ..sensitivity import SensitivityEstimator
@pytest.fixture()
def sens():
etrue = np.logspace(0, 1, 21) * u.TeV
elo = etrue[:-1]
ehi = etrue[1:]
area = np.zeros(20) + 1e6 * u.m ** 2
arf = EffectiveAreaTable(energy_lo=elo, energy_hi=ehi, data=area)
ereco = np.logspace(0, 1, 5) * u.TeV
rmf = EnergyDispersion.from_diagonal_response(etrue, ereco)
bkg_array = | np.ones(4) | numpy.ones |
#!python3
import numpy as np
##################
## Arrays
##################
# Create an array without numpy
a1 = [1, 2, 3, 4, 5]
print('List:\n{}\n'.format(a1))
# Create an array with numpy
a2 = np.array([1, 2, 3, 4, 5])
print('Array:\n{}\n'.format(a2))
# Access array elements
print('Access elements')
print('List: a1[1]\n{}\n'.format(a1[1]))
print('Array: a2[1]\n{}\n'.format(a2[1]))
# Create an array of all zeros
a = np.zeros((2,2))
print('Zeros:\n{}\n'.format(a))
# Create an array of all ones
b = | np.ones((1,2)) | numpy.ones |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
Affiliation: TU Delft and Deltares, Delft, The Netherlands
Pre-processing for Gibbs sampler to:
1. Extract seasonal shape
2. Produce time shifts for the new scenarios
"""
#==============================================================================
#STEP 0 - Import data
import matplotlib.pyplot as plt
import numpy as np
from lmfit.models import LinearModel
from scipy.signal import argrelextrema
from scipy import stats
from scipy.stats import rankdata
import statsmodels.api as sm
from statsmodels.tsa.seasonal import seasonal_decompose
import random
from scipy import interpolate
#==============================================================================
#Define functions
def seasonal_mean(x, freq):
"""
Return means for each period in x. freq is an int that gives the
number of periods per cycle. E.g., 12 for monthly. NaNs are ignored
in the mean.
"""
return np.array([np.nanmean(x[i::freq], axis=0) for i in range(freq)])
def seasonal_component(x, freq):
"""
Tiles seasonal means (periodical avereages) into a time seris as long as the original.
"""
nobs=len(x)
period_averages=seasonal_mean(x, freq)
period_averages -= np.mean(period_averages, axis=0)
return np.tile(period_averages.T, nobs // freq + 1).T[:nobs]
#==============================================================================
#Load and initialize data
data=np.load(r'tensor_daily_mean_5D.npy')
#Replace Nan with 0
data[np.where(np.isnan(data) == True)]=0
#Reshape
data=np.append(data[:,:,:,:,0],data[:,:,:,:,1],axis=3)
#Data view 3D
data_slice=data[:,3,0,:]
#CalendarYear
calendarYear=365.00
#==============================================================================
#Initialize empty vectors
#cosine_yearly_fitted=np.zeros((400, 90,np.shape(data)[3]))
x_data_ideal_matrix=np.zeros((420, 90,np.shape(data)[3]))
x_data_ideal_cont_matrix=np.zeros((420, 90,np.shape(data)[3]))
x_data_slice_matrix=np.zeros((420, 90,np.shape(data)[3]))
y_data_slice_matrix=np.zeros((420, 90,np.shape(data)[3]))
y_data_slice_smooth_matrix=np.zeros((420, 90,np.shape(data)[3]))
y_data_slice_smooth_365_nearest_matrix=np.zeros((420, 90,np.shape(data)[3]))
x_data_ideal_1D_matrix=np.zeros((np.shape(data)[0], np.shape(data)[3]))
y_data_365_nearest_matrix=np.zeros((np.shape(data)[0], np.shape(data)[3]))
deviation_matrix=np.zeros((90,np.shape(data)[3]))
line_intercept=np.zeros((1,np.shape(data)[3]))
line_slope=np.zeros((1,np.shape(data)[3]))
residual_pattern_sqr_matrix=np.zeros((int(calendarYear), np.shape(data)[3]))
#==============================================================================
#Zero to Nan
x_data_slice_matrix[x_data_slice_matrix == 0] = np.nan
x_data_ideal_matrix[x_data_ideal_matrix == 0] = np.nan
x_data_ideal_cont_matrix[x_data_ideal_cont_matrix == 0] = np.nan
y_data_slice_matrix[y_data_slice_matrix == 0] = np.nan
y_data_slice_smooth_matrix[y_data_slice_smooth_matrix == 0] = np.nan
y_data_slice_smooth_365_nearest_matrix[y_data_slice_smooth_365_nearest_matrix == 0] = np.nan
x_data_ideal_1D_matrix[x_data_ideal_1D_matrix == 0] = np.nan
y_data_365_nearest_matrix[y_data_365_nearest_matrix == 0] = np.nan
residual_pattern_sqr_matrix[residual_pattern_sqr_matrix == 0] = np.nan
#==============================================================================
#Choose time interval by the number of timesteps
datalimit1=0
datalimit2=32872
#Initialize empty matrices
y_data_detrended_matrix=np.zeros((datalimit2, np.shape(data)[3]))
trend=np.zeros((datalimit2,np.shape(data)[3]))
residual=np.zeros((datalimit2,np.shape(data)[3]))
#Plot param
plt.rcParams['figure.figsize'] = (10,5)
#Initialize x and y
x_data=np.arange(0,datalimit2)/calendarYear
y_data_all=data[datalimit1:datalimit2,0,0,:]
#Choose scenarios
scenarios= [0,1,2,4,5,6,7,9] #range(np.shape(data)[3])
for i in scenarios:
y_data=data[datalimit1:datalimit2,0,0,i]
#==============================================================================
#STEP0 - Identify, trend, seasonality, residual
result = seasonal_decompose(y_data, freq=365, model='additive')
#Fit lineat trend
#Get parameters: alpha and beta
#Fit curve with lmfit
line_mod=LinearModel(prefix='line_')
pars_line = line_mod.guess(y_data, x=x_data)
result_line_model=line_mod.fit(y_data, pars_line, x=x_data)
print(result_line_model.fit_report())
line_intercept[:,i]=result_line_model.params['line_intercept']._val
line_slope[:,i]=result_line_model.params['line_slope']._val
trend[:,i]=result_line_model.best_fit
#==============================================================================
#STEP 2
#Remove trend
y_data_detrended=y_data-result_line_model.best_fit
y_data_detrended_matrix[:,i]=y_data_detrended
y_data_detrend=seasonal_component(y_data_detrended, int(calendarYear)) #seasonal component
#==============================================================================
#Smooth LOWESS
lowess = sm.nonparametric.lowess
data_smooth_lowess=lowess(y_data_detrended, x_data, frac=1./500, it=0, delta = 0.01, is_sorted=True, missing='drop', return_sorted=False)
# for local minima
#add reflective boundary
#data_smooth_reflective = np.pad(data_smooth, 0, mode='reflect')
local_min_location=np.array(argrelextrema(data_smooth_lowess, np.less, order=300, mode='wrap'))
#local_min_location = (np.insert(local_min_location, 90,(len(x_data)-1))).reshape((1,-1))
local_min=data_smooth_lowess[local_min_location]
#distance between minima
dist_minima=np.transpose(np.diff(local_min_location))
#Plot deviations from calendar year (histogram)
deviation=(calendarYear-dist_minima)/calendarYear
deviation_matrix[:len(deviation),i]=deviation.flatten(order='F')
#==============================================================================
#STEP 3
#Chop years and Fit cosine curve
#Get parameters: amplitude
for j in range(np.shape(local_min_location)[1]-1):
y_data_slice=y_data_detrended[np.int(local_min_location[:,j]):np.int(local_min_location[:,j+1])]
x_data_slice=np.arange(0,len(y_data_slice))/calendarYear
x_data_slice_365=np.arange(0,365)/calendarYear
#Remove time change from data
x_data_ideal=np.linspace(0,calendarYear,len(y_data_slice))/calendarYear
if j==0:
x_data_ideal_cont=np.linspace((j*calendarYear),(j+1)*calendarYear,len(y_data_slice))/calendarYear
else:
x_data_ideal_cont=np.linspace((j*calendarYear)+1,(j+1)*calendarYear,len(y_data_slice))/calendarYear
y_data_slice_smooth=lowess(y_data_slice, x_data_ideal, frac=1./10, it=0, is_sorted=True, missing='drop', return_sorted=False)
#Interpolate to regular step - smooth data
f = interpolate.interp1d(x_data_ideal, y_data_slice_smooth,kind='nearest', fill_value="extrapolate")
y_data_slice_smooth_365_nearest= f(x_data_slice_365) # use interpolation function returned by `interp1d`
x_data_slice_matrix[:len(x_data[np.int(local_min_location[:,j]):np.int(local_min_location[:,j+1])]),j,i]=x_data_slice
x_data_ideal_matrix[:len(x_data[np.int(local_min_location[:,j]):np.int(local_min_location[:,j+1])]),j,i]=x_data_ideal
x_data_ideal_cont_matrix[:len(x_data[np.int(local_min_location[:,j]):np.int(local_min_location[:,j+1])]),j,i]=x_data_ideal_cont
y_data_slice_matrix[:len(y_data_detrend[np.int(local_min_location[:,j]):np.int(local_min_location[:,j+1])]),j,i]=y_data_slice
y_data_slice_smooth_matrix[:len(y_data_detrend[np.int(local_min_location[:,j]):np.int(local_min_location[:,j+1])]),j,i]=y_data_slice_smooth
y_data_slice_smooth_365_nearest_matrix[:len(y_data_slice_smooth_365_nearest),j,i]=y_data_slice_smooth_365_nearest
#Plot fitted all sin curve (lmfit)
plt.figure(figsize=(12, 5))
labels=['Data points']
plt.plot(x_data_ideal_matrix[:,:,i], y_data_slice_matrix[:,:,i], 'bo', alpha = 0.1)
plt.plot(x_data_ideal_matrix[:,:,i], y_data_slice_smooth_matrix[:,:,i], 'k-')
plt.xlabel('Number of years')
plt.ylabel('Radiation')
plt.legend(labels)
plt.title("Scenario: " + str(i))
plt.show()
#x_data_ideal_1D
x_data_ideal_1D = x_data_ideal_cont_matrix[:,:,i].flatten(order='F')
x_data_ideal_1D = x_data_ideal_1D[~np.isnan(x_data_ideal_1D)]
#Interpolate to regular step
f = interpolate.interp1d(x_data_ideal_1D, y_data[:len(x_data_ideal_1D)],kind='nearest', fill_value="extrapolate")
y_data_365_nearest= f(x_data[:len(x_data_ideal_1D)]) # use interpolation function returned by `interp1d`
#Save in matrix
x_data_ideal_1D_matrix[:len(x_data_ideal_1D),i]=x_data_ideal_1D
y_data_365_nearest_matrix[:len(y_data_365_nearest),i]=y_data_365_nearest
print(i)
#==============================================================================
#==============================================================================
#Flatten arrays with all years into 1D vector
x_data_ideal_flattened=np.zeros((np.shape(x_data_ideal_matrix)[0], np.shape(x_data_ideal_matrix)[1]*np.shape(x_data_ideal_matrix)[2]))
y_data_slice_flattened=np.zeros((np.shape(y_data_slice_matrix)[0], np.shape(y_data_slice_matrix)[1]*np.shape(y_data_slice_matrix)[2]))
y_data_slice_smooth_flattened=np.zeros((np.shape(y_data_slice_smooth_matrix)[0], np.shape(y_data_slice_smooth_matrix)[1]*np.shape(y_data_slice_smooth_matrix)[2]))
y_data_slice_smooth_365_nearest_flattened=np.zeros((np.shape(y_data_slice_smooth_matrix)[0], np.shape(y_data_slice_smooth_matrix)[1]*np.shape(y_data_slice_smooth_matrix)[2]))
for k in range(np.shape(y_data_slice_matrix)[0]):
x_data_ideal_flattened[k,:]=x_data_ideal_matrix[k,:,:].flatten(order='F')
y_data_slice_flattened[k,:]=y_data_slice_matrix[k,:,:].flatten(order='F')
y_data_slice_smooth_flattened[k,:]=y_data_slice_smooth_matrix[k,:,:].flatten(order='F')
y_data_slice_smooth_365_nearest_flattened[k,:]=y_data_slice_smooth_365_nearest_matrix[k,:,:].flatten(order='F')
#Average smooth curve
av_loess_scenario=np.nanmean(y_data_slice_smooth_365_nearest_matrix, axis=1)
av_loess_scenario=av_loess_scenario[:365,:]
#Save av_loess_scenario
np.save(r'av_loess_scenario.npy', av_loess_scenario)
#Average smoothed
av_loess=np.nanmean(y_data_slice_smooth_365_nearest_flattened, axis=1)
av_loess=av_loess[~np.isnan(av_loess)]
#==============================================================================
#All parameters
deviation_flattened=deviation_matrix.flatten(order='F')
line_intercept_flattened=line_intercept.flatten(order='F')
line_slope_flattened=line_slope.flatten(order='F')
#Remove Nans
deviation_flattened= deviation_flattened[~np.isnan(deviation_flattened)]
#Remove zeros
#amplitude_loess_flattened= amplitude_loess_flattened[np.nonzero(amplitude_loess_flattened)]
deviation_flattened= deviation_flattened[np.nonzero(deviation_flattened)]
line_intercept_flattened=line_intercept_flattened[np.nonzero(line_intercept_flattened)]
line_slope_flattened=line_slope_flattened[np.nonzero(line_slope_flattened)]
#Analyse parameter change
#Parameter statistics
#deviation
deviation_mean=np.mean(deviation_flattened)
deviation_std=np.std(deviation_flattened)
print('deviation - mean:',np.round(deviation_mean,decimals=4), 'std:',np.round(deviation_std, decimals=4))
#Deviation per scenario
deviation_mean_sc=np.mean(deviation_matrix, axis=0)
deviation_std_sc=np.std(deviation_matrix, axis=0)
#Remove zeros
deviation_mean_sc= deviation_mean_sc[np.nonzero(deviation_mean_sc)]
deviation_std_sc= deviation_std_sc[np.nonzero(deviation_std_sc)]
#Line intercept
line_intercept_mean=np.mean(line_intercept_flattened)
line_intercept_std=np.std(line_intercept_flattened)
print('line_intercept - mean:',np.round(line_intercept_mean,decimals=2), 'std:',np.round(line_intercept_std, decimals=2))
#Line slope
line_slope_mean=np.mean(line_slope_flattened)
line_slope_std=np.std(line_slope_flattened)
print('line_slope - mean:',np.round(line_slope_mean,decimals=2), 'std:',np.round(line_slope_std, decimals=2))
#==============================================================================
#Combine LOESS curves
new_loess=[]
def new_loess_func():
for l in np.arange(85):
if l==0:
new_loess = lowess(av_loess[:365], np.squeeze(np.linspace(0, dist_minima[l], num=len(av_loess[:365]))/calendarYear), frac=1./10, it=0, is_sorted=True, missing='drop', return_sorted=False)
else:
new_loess = np.append(new_loess, (lowess(av_loess[:365], np.squeeze(np.linspace(0, dist_minima[l], num=len(av_loess[:365]))/calendarYear), frac=1./10, it=0, is_sorted=True, missing='drop', return_sorted=False)), axis=0)
return new_loess
long_loess=new_loess_func()
#Mean y_data_detrended
y_data_detrended_mean=np.mean(y_data_detrended_matrix, axis=1)
#residuals
resdiuals_longloess=y_data_detrended_mean[:len(long_loess)]-long_loess[:]
#==============================================================================
#Add trend and model residual
for r in scenarios:
model_trend_loess=long_loess+trend[:len(long_loess),r]
#residuals
#residuals_modeled=y_data[:len(long_cosine)]-model_trend_residual
residuals_modeled_loess=y_data_365_nearest_matrix[:len(long_loess),r]-model_trend_loess
std_residuals_modeled_loess=np.nanstd(residuals_modeled_loess)
#Take variance of residuals
residuals_modeled_loess_var=np.square(residuals_modeled_loess)
residuals_modeled_loess_var_av=seasonal_mean(residuals_modeled_loess_var, 365)
#Pattern in average residual shape
residual_pattern=lowess(residuals_modeled_loess_var_av, np.arange(len(residuals_modeled_loess_var_av)), frac=1./10, it=0, is_sorted=True, missing='drop', return_sorted=False)
residual_pattern_sqr= | np.sqrt(residual_pattern) | numpy.sqrt |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=too-many-return-statements,unpacking-non-sequence
"""
Transformations between QuantumChannel representations.
"""
import numpy as np
import scipy.linalg as la
from qiskit.exceptions import QiskitError
from qiskit.quantum_info.operators.predicates import is_hermitian_matrix
from qiskit.quantum_info.operators.predicates import ATOL_DEFAULT
def _transform_rep(input_rep, output_rep, data, input_dim, output_dim):
"""Transform a QuantumChannel between representation."""
if input_rep == output_rep:
return data
if output_rep == 'Choi':
return _to_choi(input_rep, data, input_dim, output_dim)
if output_rep == 'Operator':
return _to_operator(input_rep, data, input_dim, output_dim)
if output_rep == 'SuperOp':
return _to_superop(input_rep, data, input_dim, output_dim)
if output_rep == 'Kraus':
return _to_kraus(input_rep, data, input_dim, output_dim)
if output_rep == 'Chi':
return _to_chi(input_rep, data, input_dim, output_dim)
if output_rep == 'PTM':
return _to_ptm(input_rep, data, input_dim, output_dim)
if output_rep == 'Stinespring':
return _to_stinespring(input_rep, data, input_dim, output_dim)
raise QiskitError('Invalid QuantumChannel {}'.format(output_rep))
def _to_choi(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the Choi representation."""
if rep == 'Choi':
return data
if rep == 'Operator':
return _from_operator('Choi', data, input_dim, output_dim)
if rep == 'SuperOp':
return _superop_to_choi(data, input_dim, output_dim)
if rep == 'Kraus':
return _kraus_to_choi(data)
if rep == 'Chi':
return _chi_to_choi(data, input_dim)
if rep == 'PTM':
data = _ptm_to_superop(data, input_dim)
return _superop_to_choi(data, input_dim, output_dim)
if rep == 'Stinespring':
return _stinespring_to_choi(data, input_dim, output_dim)
raise QiskitError('Invalid QuantumChannel {}'.format(rep))
def _to_superop(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the SuperOp representation."""
if rep == 'SuperOp':
return data
if rep == 'Operator':
return _from_operator('SuperOp', data, input_dim, output_dim)
if rep == 'Choi':
return _choi_to_superop(data, input_dim, output_dim)
if rep == 'Kraus':
return _kraus_to_superop(data)
if rep == 'Chi':
data = _chi_to_choi(data, input_dim)
return _choi_to_superop(data, input_dim, output_dim)
if rep == 'PTM':
return _ptm_to_superop(data, input_dim)
if rep == 'Stinespring':
return _stinespring_to_superop(data, input_dim, output_dim)
raise QiskitError('Invalid QuantumChannel {}'.format(rep))
def _to_kraus(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the Kraus representation."""
if rep == 'Kraus':
return data
if rep == 'Stinespring':
return _stinespring_to_kraus(data, output_dim)
if rep == 'Operator':
return _from_operator('Kraus', data, input_dim, output_dim)
# Convert via Choi and Kraus
if rep != 'Choi':
data = _to_choi(rep, data, input_dim, output_dim)
return _choi_to_kraus(data, input_dim, output_dim)
def _to_chi(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the Chi representation."""
if rep == 'Chi':
return data
# Check valid n-qubit input
_check_nqubit_dim(input_dim, output_dim)
if rep == 'Operator':
return _from_operator('Chi', data, input_dim, output_dim)
# Convert via Choi representation
if rep != 'Choi':
data = _to_choi(rep, data, input_dim, output_dim)
return _choi_to_chi(data, input_dim)
def _to_ptm(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the PTM representation."""
if rep == 'PTM':
return data
# Check valid n-qubit input
_check_nqubit_dim(input_dim, output_dim)
if rep == 'Operator':
return _from_operator('PTM', data, input_dim, output_dim)
# Convert via Superoperator representation
if rep != 'SuperOp':
data = _to_superop(rep, data, input_dim, output_dim)
return _superop_to_ptm(data, input_dim)
def _to_stinespring(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the Stinespring representation."""
if rep == 'Stinespring':
return data
if rep == 'Operator':
return _from_operator('Stinespring', data, input_dim, output_dim)
# Convert via Superoperator representation
if rep != 'Kraus':
data = _to_kraus(rep, data, input_dim, output_dim)
return _kraus_to_stinespring(data, input_dim, output_dim)
def _to_operator(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the Operator representation."""
if rep == 'Operator':
return data
if rep == 'Stinespring':
return _stinespring_to_operator(data, output_dim)
# Convert via Kraus representation
if rep != 'Kraus':
data = _to_kraus(rep, data, input_dim, output_dim)
return _kraus_to_operator(data)
def _from_operator(rep, data, input_dim, output_dim):
"""Transform Operator representation to other representation."""
if rep == 'Operator':
return data
if rep == 'SuperOp':
return np.kron(np.conj(data), data)
if rep == 'Choi':
vec = np.ravel(data, order='F')
return np.outer(vec, np.conj(vec))
if rep == 'Kraus':
return [data], None
if rep == 'Stinespring':
return data, None
if rep == 'Chi':
_check_nqubit_dim(input_dim, output_dim)
data = _from_operator('Choi', data, input_dim, output_dim)
return _choi_to_chi(data, input_dim)
if rep == 'PTM':
_check_nqubit_dim(input_dim, output_dim)
data = _from_operator('SuperOp', data, input_dim, output_dim)
return _superop_to_ptm(data, input_dim)
raise QiskitError('Invalid QuantumChannel {}'.format(rep))
def _kraus_to_operator(data):
"""Transform Kraus representation to Operator representation."""
if data[1] is not None or len(data[0]) > 1:
raise QiskitError(
'Channel cannot be converted to Operator representation')
return data[0][0]
def _stinespring_to_operator(data, output_dim):
"""Transform Stinespring representation to Operator representation."""
trace_dim = data[0].shape[0] // output_dim
if data[1] is not None or trace_dim != 1:
raise QiskitError(
'Channel cannot be converted to Operator representation')
return data[0]
def _superop_to_choi(data, input_dim, output_dim):
"""Transform SuperOp representation to Choi representation."""
shape = (output_dim, output_dim, input_dim, input_dim)
return _reshuffle(data, shape)
def _choi_to_superop(data, input_dim, output_dim):
"""Transform Choi to SuperOp representation."""
shape = (input_dim, output_dim, input_dim, output_dim)
return _reshuffle(data, shape)
def _kraus_to_choi(data):
"""Transform Kraus representation to Choi representation."""
choi = 0
kraus_l, kraus_r = data
if kraus_r is None:
for i in kraus_l:
vec = i.ravel(order='F')
choi += np.outer(vec, vec.conj())
else:
for i, j in zip(kraus_l, kraus_r):
choi += np.outer(i.ravel(order='F'), j.ravel(order='F').conj())
return choi
def _choi_to_kraus(data, input_dim, output_dim, atol=ATOL_DEFAULT):
"""Transform Choi representation to Kraus representation."""
# Check if hermitian matrix
if is_hermitian_matrix(data, atol=atol):
# Get eigen-decomposition of Choi-matrix
# This should be a call to la.eigh, but there is an OpenBlas
# threading issue that is causing segfaults.
# Need schur here since la.eig does not
# guarentee orthogonality in degenerate subspaces
w, v = la.schur(data, output='complex')
w = w.diagonal().real
# Check eigenvalues are non-negative
if len(w[w < -atol]) == 0:
# CP-map Kraus representation
kraus = []
for val, vec in zip(w, v.T):
if abs(val) > atol:
k = np.sqrt(val) * vec.reshape(
(output_dim, input_dim), order='F')
kraus.append(k)
# If we are converting a zero matrix, we need to return a Kraus set
# with a single zero-element Kraus matrix
if not kraus:
kraus.append(np.zeros((output_dim, input_dim), dtype=complex))
return kraus, None
# Non-CP-map generalized Kraus representation
mat_u, svals, mat_vh = la.svd(data)
kraus_l = []
kraus_r = []
for val, vec_l, vec_r in zip(svals, mat_u.T, mat_vh.conj()):
kraus_l.append(
np.sqrt(val) * vec_l.reshape((output_dim, input_dim), order='F'))
kraus_r.append(
np.sqrt(val) * vec_r.reshape((output_dim, input_dim), order='F'))
return kraus_l, kraus_r
def _stinespring_to_kraus(data, output_dim):
"""Transform Stinespring representation to Kraus representation."""
kraus_pair = []
for stine in data:
if stine is None:
kraus_pair.append(None)
else:
trace_dim = stine.shape[0] // output_dim
iden = np.eye(output_dim)
kraus = []
for j in range(trace_dim):
vec = np.zeros(trace_dim)
vec[j] = 1
kraus.append(np.kron(iden, vec[None, :]).dot(stine))
kraus_pair.append(kraus)
return tuple(kraus_pair)
def _stinespring_to_choi(data, input_dim, output_dim):
"""Transform Stinespring representation to Choi representation."""
trace_dim = data[0].shape[0] // output_dim
stine_l = np.reshape(data[0], (output_dim, trace_dim, input_dim))
if data[1] is None:
stine_r = stine_l
else:
stine_r = np.reshape(data[1], (output_dim, trace_dim, input_dim))
return np.reshape(
np.einsum('iAj,kAl->jilk', stine_l, stine_r.conj()),
2 * [input_dim * output_dim])
def _stinespring_to_superop(data, input_dim, output_dim):
"""Transform Stinespring representation to SuperOp representation."""
trace_dim = data[0].shape[0] // output_dim
stine_l = np.reshape(data[0], (output_dim, trace_dim, input_dim))
if data[1] is None:
stine_r = stine_l
else:
stine_r = np.reshape(data[1], (output_dim, trace_dim, input_dim))
return np.reshape(
np.einsum('iAj,kAl->ikjl', stine_r.conj(), stine_l),
(output_dim * output_dim, input_dim * input_dim))
def _kraus_to_stinespring(data, input_dim, output_dim):
"""Transform Kraus representation to Stinespring representation."""
stine_pair = [None, None]
for i, kraus in enumerate(data):
if kraus is not None:
num_kraus = len(kraus)
stine = np.zeros((output_dim * num_kraus, input_dim),
dtype=complex)
for j, mat in enumerate(kraus):
vec = np.zeros(num_kraus)
vec[j] = 1
stine += np.kron(mat, vec[:, None])
stine_pair[i] = stine
return tuple(stine_pair)
def _kraus_to_superop(data):
"""Transform Kraus representation to SuperOp representation."""
kraus_l, kraus_r = data
superop = 0
if kraus_r is None:
for i in kraus_l:
superop += np.kron(np.conj(i), i)
else:
for i, j in zip(kraus_l, kraus_r):
superop += np.kron(np.conj(j), i)
return superop
def _chi_to_choi(data, input_dim):
"""Transform Chi representation to a Choi representation."""
num_qubits = int(np.log2(input_dim))
return _transform_from_pauli(data, num_qubits)
def _choi_to_chi(data, input_dim):
"""Transform Choi representation to the Chi representation."""
num_qubits = int(np.log2(input_dim))
return _transform_to_pauli(data, num_qubits)
def _ptm_to_superop(data, input_dim):
"""Transform PTM representation to SuperOp representation."""
num_qubits = int(np.log2(input_dim))
return _transform_from_pauli(data, num_qubits)
def _superop_to_ptm(data, input_dim):
"""Transform SuperOp representation to PTM representation."""
num_qubits = int(np.log2(input_dim))
return _transform_to_pauli(data, num_qubits)
def _bipartite_tensor(mat1, mat2, shape1=None, shape2=None):
"""Tensor product (A ⊗ B) to bipartite matrices and reravel indices.
This is used for tensor product of superoperators and Choi matrices.
Args:
mat1 (matrix_like): a bipartite matrix A
mat2 (matrix_like): a bipartite matrix B
shape1 (tuple): bipartite-shape for matrix A (a0, a1, a2, a3)
shape2 (tuple): bipartite-shape for matrix B (b0, b1, b2, b3)
Returns:
np.array: a bipartite matrix for reravel(A ⊗ B).
Raises:
QiskitError: if input matrices are wrong shape.
"""
# Convert inputs to numpy arrays
mat1 = np.array(mat1)
mat2 = np.array(mat2)
# Determine bipartite dimensions if not provided
dim_a0, dim_a1 = mat1.shape
dim_b0, dim_b1 = mat2.shape
if shape1 is None:
sdim_a0 = int(np.sqrt(dim_a0))
sdim_a1 = int(np.sqrt(dim_a1))
shape1 = (sdim_a0, sdim_a0, sdim_a1, sdim_a1)
if shape2 is None:
sdim_b0 = int(np.sqrt(dim_b0))
sdim_b1 = int(np.sqrt(dim_b1))
shape2 = (sdim_b0, sdim_b0, sdim_b1, sdim_b1)
# Check dimensions
if len(shape1) != 4 or shape1[0] * shape1[1] != dim_a0 or \
shape1[2] * shape1[3] != dim_a1:
raise QiskitError("Invalid shape_a")
if len(shape2) != 4 or shape2[0] * shape2[1] != dim_b0 or \
shape2[2] * shape2[3] != dim_b1:
raise QiskitError("Invalid shape_b")
return _reravel(mat1, mat2, shape1, shape2)
def _reravel(mat1, mat2, shape1, shape2):
"""Reravel two bipartite matrices."""
# Reshuffle indices
left_dims = shape1[:2] + shape2[:2]
right_dims = shape1[2:] + shape2[2:]
tensor_shape = left_dims + right_dims
final_shape = (np.product(left_dims), | np.product(right_dims) | numpy.product |
from functools import reduce
import importlib
import numpy as np
import torch
from utils.utils import one_hot_encoding
__all__ = ('Compose', 'ToTensor', 'LabelSelection', 'ChannelSelection')
class Compose:
def __init__(self, transforms):
if transforms.__class__.__name__ not in ['AttrDict', 'dict']:
raise TypeError(f"Not supported {transforms.__class__.__name__} type yet.")
transforms_module = importlib.import_module('data_loader.transforms')
configure_transform = lambda transform, params: getattr(transforms_module, transform)(**params) \
if type(params).__name__ in ['dict', 'AttrDict'] else getattr(transforms_module, transform)(params)
self.transforms = [configure_transform(transform, params) for transform, params in transforms.items()]
def __call__(self, dataset):
for transform in self.transforms:
transform(dataset)
def __repr__(self):
format_string = self.__class__.__name__ + '(\n'
for transform in self.transforms:
format_string += f' {transform.__class__.__name__}()\n'
format_string += ')'
return format_string
class ToTensor:
def __init__(self, *args, **kwargs):
pass
def __call__(self, dataset):
if type(dataset.x) != torch.Tensor:
dataset.x = torch.as_tensor(dataset.x, dtype=torch.float)
if type(dataset.y) != torch.Tensor:
dataset.y = torch.as_tensor(dataset.y, dtype=torch.long)
def __repr__(self):
return self.__class__.__name__
class LabelSelection:
def __init__(self, target_labels, is_one_hot=False):
self.target_labels = target_labels
self.is_one_hot = is_one_hot
def __call__(self, dataset):
if self.is_one_hot == 'one_hot_encoding':
dataset.y = np.argmax(dataset.y, axis=1)
# Select labels
labels = ((dataset.y == label) for label in self.target_labels)
idx = reduce(lambda x, y: x | y, labels)
dataset.x = dataset.x[idx]
dataset.y = dataset.y[idx]
# Mapping labels
for mapping, label in enumerate(np.unique(dataset.y)):
dataset.y[dataset.y == label] = mapping
if self.is_one_hot == 'one_hot_encoding':
dataset.y = one_hot_encoding(dataset.y)
def __repr__(self):
return self.__class__.__name__
class ChannelSelection:
def __init__(self, target_chans):
self.target_chans = target_chans
def __call__(self, dataset):
target_idx = [i for i, chan in enumerate(self.target_chans) if chan in dataset.ch_names]
dataset.x = dataset.x[..., target_idx, :]
def __repr__(self):
return self.__class__.__name__
class TimeSegmentation:
def __init__(self, window_size, step_size, axis=1, merge='stack'):
self.window_size = window_size
self.step_size = step_size
self.axis = axis
self.merge = merge
def __call__(self, dataset):
segments = []
times = np.arange(dataset.x.shape[-1])
start = times[::self.step_size]
end = start + self.window_size
for s, e in zip(start, end):
if e > len(times):
break
segments.append(dataset.x[..., s:e])
if self.merge == 'stack':
dataset.x = np.stack(segments, axis=self.axis)
elif self.merge == 'concat':
dataset.x = | np.concatenate(segments, axis=self.axis) | numpy.concatenate |
import numpy as np
from uniswap_simulator import Position
def coerce_to_tick_spacing(spacing, ticks):
ticks = ticks.copy()
ticks[...,0] -= np.mod(ticks[...,0], spacing)
ticks[...,1] -= np.mod(ticks[...,1], spacing) - spacing
return ticks
class DRDP0Strategy:
limit_order_width = 10
epsilon = 0.001
def __init__(self, price, lower, upper, fee):
self._tick_spacing = 10
if fee == 0.3 / 100:
self._tick_spacing = 60
elif fee == 1.0 / 100:
self._tick_spacing = 100
self.position = Position(price, lower, upper, fee)
self.limit_order = Position(price, lower, price / 1.0001, fee)
def reset(self, price):
self.position.reset(price)
self.limit_order = Position(price, self.position.lower, price / 1.0001, self.position.fee)
def mint(self, amount0, amount1):
return self.position.mint(amount0, amount1)
def update(self, price):
amounts = self.position.update(price)
amounts += self.limit_order.update(price)
self._compound(price, amounts.copy())
return amounts
def _compound(self, price, amounts, fraction=0.99):
if self.position._earned is None:
return
inactive_limit_orders = np.any((
price < self.limit_order.lower,
price > self.limit_order.upper
), axis=0)
earned = self.position._earned.copy()
earned += self.limit_order.burn()
# change basis of `amounts[...,0]` so that it represents wealth in each asset
amounts[...,0] *= price
excess0 = amounts[...,0] > amounts[...,1]
# compute active trading range (defined by lower and upper ticks)
active_ticks = np.log(price) / np.log(1.0001)
active_ticks = coerce_to_tick_spacing(
self._tick_spacing,
| np.vstack((active_ticks, active_ticks)) | numpy.vstack |
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix
from datasets import load_dataset
from sklearn.svm import LinearSVC
import numpy as np
import pandas as pd
from sentence_transformers import SentenceTransformer
import torch
from torch import nn
from torch.nn import functional as func
from torch.optim import AdamW
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from os import path, makedirs
import nets
import matplotlib.pyplot as plt
#import hiddenlayer as hl
#import graphviz
# def build_cnn(config):
# if config == 1:
def gpu_to_cpu(loc, cls):
model = cls()
model = torch.load(loc, map_location=torch.device('cpu'))
torch.save(model, loc)
def graph(loss_vals, model_name, dataset_name):
epochs = [x[0] for x in loss_vals]
loss = [x[1] for x in loss_vals]
val_loss = [x[2] for x in loss_vals]
plt.plot(epochs, loss, label='Training Loss')
plt.plot(epochs, val_loss, label='Validation Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.title('Training info for ' + model_name +' on ' + dataset_name)
plt.legend()
plt.savefig('graphs/' + model_name + 'loss')
plt.clf()
def train_svm(X_tr, y_tr, X_te, y_te):
svc = LinearSVC(verbose=True)
svc.fit(X_tr, y_tr)
svm_predictions = svc.predict(X_te)
print('SVM_tfidf_acc = {}'.format(accuracy_score(y_te, svm_predictions)))
print('SVM_tfidf confusion matrix:')
print(confusion_matrix(y_te, svm_predictions))
def train_model(model, opt, train_load, val_load, test_load, epochs, model_name=""):
print('Starting training')
epoch_losses = []
best_val_loss = 1000
for epoch in range(epochs):
model.train()
losses = []
lossf = nn.CrossEntropyLoss()
for _, batch in enumerate(train_load):
embed_gpu, lab_gpu = tuple(i.to(device) for i in batch)
opt.zero_grad()
logits = model(embed_gpu)
loss = lossf(logits, lab_gpu)
losses.append(loss.item() / len(batch))
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 1)
opt.step()
epoch_loss = np.mean(losses)
print('Training Loss: {}'.format(epoch_loss))
val_loss = validate_model(model, val_load, 'Validation')
epoch_losses.append((epoch, epoch_loss, val_loss))
graph(epoch_losses, model_name, 'amazon_us_reviews')
if val_loss < best_val_loss:
torch.save(model, 'checkpoint.pth')
validate_model(model, test_load, 'Test')
torch.save(model, 'models/' + model_name + '.pth')
gpu_to_cpu('checkpoint.pth', nets.Net)
gpu_to_cpu('models/' + model_name + '.pth', nets.Net)
return epoch_losses
def validate_model(model, loader, set_name):
model.eval()
acc = []
losses = []
for batch in loader:
gpu_embed, gpu_lab = tuple(i.to(device) for i in batch)
with torch.no_grad():
logits = model(gpu_embed)
lossf = nn.CrossEntropyLoss()
loss = lossf(logits, gpu_lab)
losses.append(loss.item()/len(batch))
_, predictions = torch.max(logits, dim=1)
accuracy = (predictions == gpu_lab).cpu().numpy().mean()
acc.append(accuracy)
print('{} loss: {}'.format(set_name, np.mean(losses)))
print('{} acc: {}'.format(set_name, np.mean(acc)))
return np.mean(losses)
# device = 'cuda'
device = 'cpu'
#torch.autograd.set_detect_anomaly(True)
DATASET_NAME = 'amazon_us_reviews'
SUBSET = 'Digital_Software_v1_00'
if not path.exists('test/embeddings_tensor.pth'):
distilbert = SentenceTransformer('stsb-distilbert-base', device='cpu')
dataset = load_dataset(DATASET_NAME, SUBSET)
data = dataset['train']['review_body']
labels_sav = dataset['train']['star_rating']
amounts = []
for cl in np.unique(labels_sav):
amounts.append((cl, labels_sav.count(cl)))
print(amounts)
train_data, test_data, train_labels, test_labels = train_test_split(data, labels_sav, stratify=labels_sav, test_size=0.2)
train_bin_labels = list(map(lambda x: 1 if x > 3 else 0, train_labels))
test_bin_labels = list(map(lambda x: 1 if x > 3 else 0, test_labels))
train_distilbert_embed = distilbert.encode(train_data, show_progress_bar=False)
test_distilbert_embed = distilbert.encode(test_data, show_progress_bar=False)
makedirs('train', exist_ok=True)
makedirs('test', exist_ok=True)
np.save('train/text.npy', train_data, allow_pickle=True)
np.save('train/embeddings.npy', train_distilbert_embed, allow_pickle=True)
np.save('train/bin_labels.npy', train_bin_labels, allow_pickle=True)
np.save('train/labels.npy', train_labels, allow_pickle=True)
np.save('test/text.npy', test_data, allow_pickle=True)
np.save('test/embeddings.npy', test_distilbert_embed, allow_pickle=True)
np.save('test/bin_labels.npy', test_bin_labels, allow_pickle=True)
np.save('test/labels.npy', test_labels, allow_pickle=True)
torch.save(torch.FloatTensor(test_bin_labels), 'test/bin_labels_tensor.pth')
torch.save(torch.FloatTensor(test_distilbert_embed), 'test/embeddings_tensor.pth')
train_embeddings = np.load('train/embeddings.npy', allow_pickle=True)
train_labels = np.load('train/bin_labels.npy', allow_pickle=True)
test_embeddings = | np.load('test/embeddings.npy', allow_pickle=True) | numpy.load |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 18 11:47:18 2016
@author: sebalander
"""
# %% IMPORTS
from matplotlib.pyplot import plot, imshow, legend, show, figure, gcf, imread
from matplotlib.pyplot import xlabel, ylabel
from cv2 import Rodrigues # , homogr2pose
from numpy import max, zeros, array, sqrt, roots, diag, sum, log
from numpy import sin, cos, cross, ones, concatenate, flipud, dot, isreal
from numpy import linspace, polyval, eye, linalg, mean, prod, vstack
from numpy import ones_like, zeros_like, pi, float64, transpose
from numpy import any as anny
from numpy.linalg import svd, inv, det
from scipy.linalg import norm
from scipy.special import chdtri
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
# from copy import deepcopy as dc
from importlib import reload
from calibration import StereographicCalibration as stereographic
from calibration import UnifiedCalibration as unified
from calibration import RationalCalibration as rational
from calibration import FisheyeCalibration as fisheye
from calibration import PolyCalibration as poly
reload(stereographic)
reload(unified)
reload(rational)
reload(fisheye)
reload(poly)
def f64(x):
return array(x, dtype=float64)
# %% calss that holds all data
class syntintr:
def __init__(self, k=None, uv=None, s=None, camera=None, model=None):
self.k = k
self.uv = uv
self.s = s
self.camera = camera
self.model = model
class syntches:
def __init__(self, nIm=None, nPt=None, rVecs=None, tVecs=None, objPt=None,
imgPt=None, imgNse=None):
self.nIm = nIm
self.nPt = nPt
self.rVecs = rVecs
self.tVecs = tVecs
self.objPt = objPt
self.imgPt = imgPt
self.imgNse = imgNse
class syntextr:
def __init__(self, ang=None, h=None, rVecs=None, tVecs=None, objPt=None,
imgPt=None, index10=None, imgNse=None):
self.ang = ang
self.h = h
self.rVecs = rVecs
self.tVecs = tVecs
self.objPt = objPt
self.imgPt = imgPt
self.index10 = index10
self.imgNse = imgNse
class synt:
def __init__(self, Intr=None, Ches=None, Extr=None):
self.Intr = Intr
self.Ches = Ches
self.Extr = Extr
class realches:
def __init__(self, nIm=None, nPt=None, objPt=None, imgPt=None,
imgFls=None):
self.nIm = nIm
self.nPt = nPt
self.objPt = objPt
self.imgPt = imgPt
self.imgFls = imgFls
class realbalk:
def __init__(self, objPt=None, imgPt=None, priorLLA=None, imgFl=None):
self.objPt = objPt
self.imgPt = imgPt
self.priorLLA = priorLLA
self.imgFl = imgFl
class realdete:
def __init__(self, carGPS=None, carIm=None):
self.carGPS = carGPS
self.carIm = carIm
class real:
def __init__(self, Ches=None, Balk=None, Dete=None):
self.Ches = Ches
self.Balk = Balk
self.Dete = Dete
class datafull:
'''
Nested namedtuples that hold the data for the paper
Data
Synt
Intr # listo: SyntIntr
camera 'vcaWide' string camera model
model string indicating camera intrinsic model
['poly', 'rational', 'fisheye', 'stereographic']
s is the image size
k sintehtic stereographic parameter
uv = s / 2 is the stereographic optical center
Ches # listo: SyntChes
nIm number of images
nPt number of point in image
objPt chessboard model grid
rVecs synth rotation vectors
tVecs synth tVecs
imgPt synth corners projected from objPt with synth params
imgNse noise of 1 sigma for the image
Extr # listo: SyntExtr
ang angles of synth pose tables
h heights of synth pose tables
rVecs rotation vectors associated to angles
tVecs tVecs associated to angles and h
objPt distributed 3D points on the floor
imgPt projected to image
imgNse noise for image detection, sigma 1
index10 indexes to select 10 points well distributed
Real
Ches # listo: RealChes
nIm number of chess images
nPt number of chess points per image
objPt chessboard model, 3D
imgPt detected corners in chessboard images
imgFls list of paths to the chessboard images
Balk
objPt calibration world points, lat lon
imgPt image points for calibration
priLLA prior lat-lon-altura
imgFl camera snapshot file
Dete
carGps car gps coordinates
carIm car image detection traces
'''
def __init__(self, Synt=None, Real=None):
self.Synt = Synt
self.Real = Real
# %% Z=0 PROJECTION
def euler(al, be, ga):
'''
devuelve matriz de rotacion según angulos de euler.
Craigh, pag 42
las rotaciones son en este orden:
ga: alrededor de X
be: alrededor de Y
al: alrededor de Z
'''
ca, cb, cg = | cos([al, be, ga]) | numpy.cos |
"""Utility functions for plots."""
import natsort
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import gridspec
from scipy.signal import medfilt
from nilearn import plotting as ni_plt
from tqdm import tqdm
from pynwb import NWBHDF5IO
from dandi.dandiapi import DandiAPIClient
from nwbwidgets.utils.timeseries import align_by_times, timeseries_time_to_ind
import ndx_events
def prune_clabels(
clabels_orig, targeted=False, targ_tlims=[13, 17], first_val=True, targ_label="Eat"
):
"""Modify coarse behavior labels based on whether
looking at whole day (targeted=False) or specific
hours (targeted=True). When selecting specific
hours, can look at either the first (first_val=True)
or last (first_val=False) label if there are multiple
overlapping activity labels."""
clabels = clabels_orig.copy()
if not targeted:
for i in range(len(clabels_orig)):
lab = clabels_orig.loc[i, "labels"]
if lab[:5] == "Block":
clabels.loc[i, "labels"] = "Blocklist"
elif lab == "":
clabels.loc[i, "labels"] = "Blocklist"
elif lab not in ["Sleep/rest", "Inactive"]:
clabels.loc[i, "labels"] = "Active"
else:
for i in range(len(clabels_orig)):
lab = clabels_orig.loc[i, "labels"]
if targ_label in lab.split(", "):
clabels.loc[i, "labels"] = targ_label
else:
clabels.loc[i, "labels"] = "Blocklist"
# if lab[:5] == 'Block':
# clabels.loc[i, 'labels'] = 'Blocklist'
# elif lab == '':
# clabels.loc[i, 'labels'] = 'Blocklist'
# elif first_val:
# clabels.loc[i, 'labels'] = lab.split(', ')[0]
# else:
# clabels.loc[i, 'labels'] = lab.split(', ')[-1]
if targeted:
start_val, end_val = targ_tlims[0] * 3600, targ_tlims[1] * 3600
clabels = clabels[
(clabels["start_time"] >= start_val) & (clabels["stop_time"] <= end_val)
]
clabels.reset_index(inplace=True)
uni_labs = np.unique(clabels["labels"].values)
return clabels, uni_labs
def plot_clabels(
clabels,
uni_labs,
targeted=False,
first_val=True,
targ_tlims=[13, 17],
scale_fact=1 / 3600,
bwidth=0.5,
targlab_colind=0,
):
"""Plot coarse labels for one recording day.
Note that the colors for the plots are currently
pre-defined to work for sub-01 day 4."""
# Define colors for each label
act_cols = plt.get_cmap("Reds")(np.linspace(0.15, 0.85, 5))
if targeted:
category_colors = np.array(["w", act_cols[targlab_colind]], dtype=object)
# if first_val:
# category_colors = np.array(['dimgray', act_cols[1], act_cols[2],
# act_cols[0], act_cols[3], act_cols[4]],
# dtype=object)
# else:
# category_colors = np.array(['dimgray', act_cols[1], act_cols[0],
# act_cols[3], act_cols[4]],
# dtype=object)
else:
category_colors = np.array(
[[1, 128 / 255, 178 / 255], "dimgray", "lightgreen", "lightskyblue"],
dtype=object,
)
# Plot each label as a horizontal bar
fig, ax = plt.subplots(figsize=(20, 2), dpi=150)
for i in range(len(uni_labs)):
lab_inds = np.nonzero(uni_labs[i] == clabels["labels"].values)[0]
lab_starts = clabels.loc[lab_inds, "start_time"].values
lab_stops = clabels.loc[lab_inds, "stop_time"].values
lab_widths = lab_stops - lab_starts
rects = ax.barh(
np.ones_like(lab_widths),
lab_widths * scale_fact,
left=lab_starts * scale_fact,
height=bwidth,
label=uni_labs[i],
color=category_colors[i],
)
ax.legend(
ncol=len(uni_labs), bbox_to_anchor=(0, 1), loc="lower left", fontsize="small"
)
# Define x-axis based on if targeted window or not
if targeted:
plt.xlim(targ_tlims)
targ_tlims_int = [int(val) for val in targ_tlims]
plt.xticks(targ_tlims_int)
ax.set_xticklabels(
["{}:00".format(targ_tlims_int[0]), "{}:00".format(targ_tlims_int[-1])]
)
else:
plt.xlim([0, 24])
plt.xticks([0, 12, 24])
ax.set_xticklabels(["0:00", "12:00", "0:00"])
# Remove border lines and show plot
ax.yaxis.set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
plt.show()
return fig
def clabel_table_create(
common_acts, n_parts=12, data_lp="/data2/users/stepeter/files_nwb/downloads/000055/"
):
"""Create table of coarse label durations across participants.
Labels to include in the table are specified by common_acts."""
with DandiAPIClient() as client:
paths = []
for file in client.get_dandiset("000055", "draft").get_assets_with_path_prefix(""):
paths.append(file.path)
paths = natsort.natsorted(paths)
vals_all = np.zeros([n_parts, len(common_acts) + 1])
for part_ind in tqdm(range(n_parts)):
fids = [val for val in paths if "sub-" + str(part_ind + 1).zfill(2) in val]
for fid in fids:
with DandiAPIClient() as client:
asset = client.get_dandiset("000055", "draft").get_asset_by_path(fid)
s3_path = asset.get_content_url(follow_redirects=1, strip_query=True)
with NWBHDF5IO(s3_path, mode="r", driver="ros3") as io:
nwb = io.read()
curr_labels = nwb.intervals["epochs"].to_dataframe()
durations = (
curr_labels.loc[:, "stop_time"].values
- curr_labels.loc[:, "start_time"].values
)
# Add up durations of each label
for s, curr_act in enumerate(common_acts):
for i, curr_label in enumerate(curr_labels["labels"].tolist()):
if curr_act in curr_label.split(", "):
vals_all[part_ind, s] += durations[i] / 3600
# Add up total durations of selected labels (avoid double counting)
for i, curr_label in enumerate(curr_labels["labels"].tolist()):
in_lab_grp = False
for sub_lab in curr_label.split(", "):
if sub_lab in common_acts:
in_lab_grp = True
vals_all[part_ind, -1] += durations[i] / 3600 if in_lab_grp else 0
del nwb, io
# Make final table/dataframe
common_acts_col = [val.lstrip("Blocklist (").rstrip(")") for val in common_acts]
df_all = pd.DataFrame(
vals_all.round(1),
index=["P" + str(val + 1).zfill(2) for val in range(n_parts)],
columns=common_acts_col + ["Total"],
)
return df_all
def identify_elecs(group_names):
"""Determine surface v. depth ECoG electrodes"""
is_surf = []
for label in group_names:
if "grid" in label.lower():
is_surf.append(True)
elif label.lower() in ["mhd", "latd", "lmtd", "ltpd"]:
is_surf.append(True) # special cases
elif (label.lower() == "ahd") & ("PHD" not in group_names):
is_surf.append(True) # special case
elif "d" in label.lower():
is_surf.append(False)
else:
is_surf.append(True)
return np.array(is_surf)
def load_data_characteristics(nparts=12):
"""Load data characteristics including the number of
good and total ECoG electrodes, hemisphere implanted,
and number of recording days for each participant."""
with DandiAPIClient() as client:
paths = []
for file in client.get_dandiset("000055", "draft").get_assets_with_path_prefix(""):
paths.append(file.path)
paths = natsort.natsorted(paths)
n_elecs_tot, n_elecs_good = [], []
rec_days, hemis, n_elecs_surf_tot, n_elecs_depth_tot = [], [], [], []
n_elecs_surf_good, n_elecs_depth_good = [], []
for part_ind in tqdm(range(nparts)):
fids = [val for val in paths if "sub-" + str(part_ind + 1).zfill(2) in val]
rec_days.append(len(fids))
for fid in fids[:1]:
with DandiAPIClient() as client:
asset = client.get_dandiset("000055", "draft").get_asset_by_path(fid)
s3_path = asset.get_content_url(follow_redirects=1, strip_query=True)
with NWBHDF5IO(s3_path, mode="r", driver="ros3") as io:
nwb = io.read()
# Determine good/total electrodes
n_elecs_good.append(np.sum(nwb.electrodes["good"][:]))
n_elecs_tot.append(len(nwb.electrodes["good"][:]))
# Determine implanted hemisphere
c_wrist = (
nwb.processing["behavior"].data_interfaces["ReachEvents"].description[0]
)
hemis.append("L" if c_wrist == "r" else "R")
# Determine surface vs. depth electrode count
is_surf = identify_elecs(nwb.electrodes["group_name"][:])
n_elecs_surf_tot.append(np.sum(is_surf))
n_elecs_depth_tot.append(np.sum(1 - is_surf))
n_elecs_surf_good.append(
np.sum(nwb.electrodes["good"][is_surf.nonzero()[0]])
)
n_elecs_depth_good.append(
np.sum(nwb.electrodes["good"][(1 - is_surf).nonzero()[0]])
)
del nwb, io
part_nums = [val + 1 for val in range(nparts)]
part_ids = ["P" + str(val).zfill(2) for val in part_nums]
return [
rec_days,
hemis,
n_elecs_surf_tot,
n_elecs_surf_good,
n_elecs_depth_tot,
n_elecs_depth_good,
part_nums,
part_ids,
n_elecs_good,
n_elecs_tot,
]
def plot_ecog_descript(
n_elecs_tot,
n_elecs_good,
part_ids,
nparts=12,
allLH=False,
nrows=3,
chan_labels="all",
width=7,
height=3,
):
"""Plot ECoG electrode positions and identified noisy
electrodes side by side."""
with DandiAPIClient() as client:
paths = []
for file in client.get_dandiset("000055", "draft").get_assets_with_path_prefix(""):
paths.append(file.path)
paths = natsort.natsorted(paths)
fig = plt.figure(figsize=(width * 3, height * 3), dpi=150)
# First subplot: electrode locations
ncols = nparts // nrows
gs = gridspec.GridSpec(
nrows=nrows,
ncols=ncols, # +2,
figure=fig,
width_ratios=[width / ncols]
* ncols, # [width/ncols/2]*ncols+[width/10, 4*width/10],
height_ratios=[height / nrows] * nrows,
wspace=0,
hspace=-0.5,
)
ax = [None] * (nparts) # +1)
for part_ind in tqdm(range(nparts)):
# Load NWB data file
fids = [val for val in paths if "sub-" + str(part_ind + 1).zfill(2) in val]
with DandiAPIClient() as client:
asset = client.get_dandiset("000055", "draft").get_asset_by_path(fids[0])
s3_path = asset.get_content_url(follow_redirects=1, strip_query=True)
with NWBHDF5IO(s3_path, mode="r", driver="ros3") as io:
nwb = io.read()
# Determine hemisphere to display
if allLH:
sides_2_display = "l"
else:
average_xpos_sign = np.nanmean(nwb.electrodes["x"][:])
sides_2_display = "r" if average_xpos_sign > 0 else "l"
# Run electrode plotting function
ax[part_ind] = fig.add_subplot(gs[part_ind // ncols, part_ind % ncols])
plot_ecog_electrodes_mni_from_nwb_file(
nwb,
chan_labels,
num_grid_chans=64,
node_size=50,
colors="silver",
alpha=0.9,
sides_2_display=sides_2_display,
node_edge_colors="k",
edge_linewidths=1.5,
ax_in=ax[part_ind],
allLH=allLH,
)
del nwb, io
# ax[part_ind].text(-0.2,0.1,'P'+str(part_ind+1).zfill(2), fontsize=8)
# fig.text(0.1, 0.91, '(a) ECoG electrode positions', fontsize=10)
# Second subplot: noisy electrodes per participant
# ax[-1] = fig.add_subplot(gs[:, -1])
# ax[-1].bar(part_ids,n_elecs_tot,color='lightgrey')
# ax[-1].bar(part_ids,n_elecs_good,color='dimgrey')
# ax[-1].spines['right'].set_visible(False)
# ax[-1].spines['top'].set_visible(False)
# ax[-1].set_xticklabels(part_ids, rotation=45)
# ax[-1].legend(['Total','Good'], frameon=False, fontsize=8)
# ax[-1].tick_params(labelsize=9)
# ax[-1].set_ylabel('Number of electrodes', fontsize=9, labelpad=0)
# ax[-1].set_title('(b) Total/good electrodes per participant',
# fontsize=10)
plt.show()
return fig
def plot_ecog_electrodes_mni_from_nwb_file(
nwb_dat,
chan_labels="all",
num_grid_chans=64,
colors=None,
node_size=50,
figsize=(16, 6),
sides_2_display="auto",
node_edge_colors=None,
alpha=0.5,
edge_linewidths=3,
ax_in=None,
rem_zero_chans=False,
allLH=False,
zero_rem_thresh=0.99,
elec_col_suppl=None,
):
"""
Plots ECoG electrodes from MNI coordinate file (only for specified labels)
NOTE: If running in Jupyter, use '%matplotlib inline' instead of '%matplotlib notebook'
"""
# Load channel locations
chan_info = nwb_dat.electrodes.to_dataframe()
# Create dataframe for electrode locations
if chan_labels == "all":
locs = chan_info.loc[:, ["x", "y", "z"]]
elif chan_labels == "allgood":
locs = chan_info.loc[:, ["x", "y", "z", "good"]]
else:
locs = chan_info.loc[chan_labels, ["x", "y", "z"]]
if colors is not None:
if (locs.shape[0] > len(colors)) & isinstance(colors, list):
locs = locs.iloc[: len(colors), :]
# locs.rename(columns={'X':'x','Y':'y','Z':'z'}, inplace=True)
chan_loc_x = chan_info.loc[:, "x"].values
# Remove NaN electrode locations (no location info)
nan_drop_inds = np.nonzero(np.isnan(chan_loc_x))[0]
locs.dropna(axis=0, inplace=True) # remove NaN locations
if (colors is not None) & isinstance(colors, list):
colors_new, loc_inds_2_drop = [], []
for s, val in enumerate(colors):
if not (s in nan_drop_inds):
colors_new.append(val)
else:
loc_inds_2_drop.append(s)
colors = colors_new.copy()
if elec_col_suppl is not None:
loc_inds_2_drop.reverse() # go from high to low values
for val in loc_inds_2_drop:
del elec_col_suppl[val]
if chan_labels == "allgood":
goodChanInds = chan_info.loc[:, "good", :]
inds2drop = np.nonzero(locs["good"] == 0)[0]
locs.drop(columns=["good"], inplace=True)
locs.drop(locs.index[inds2drop], inplace=True)
if colors is not None:
colors_new, loc_inds_2_drop = [], []
for s, val in enumerate(colors):
if not (s in inds2drop):
# np.all(s!=inds2drop):
colors_new.append(val)
else:
loc_inds_2_drop.append(s)
colors = colors_new.copy()
if elec_col_suppl is not None:
loc_inds_2_drop.reverse() # go from high to low values
for val in loc_inds_2_drop:
del elec_col_suppl[val]
if rem_zero_chans:
# Remove channels with zero values (white colors)
colors_new, loc_inds_2_drop = [], []
for s, val in enumerate(colors):
if np.mean(val) < zero_rem_thresh:
colors_new.append(val)
else:
loc_inds_2_drop.append(s)
colors = colors_new.copy()
locs.drop(locs.index[loc_inds_2_drop], inplace=True)
if elec_col_suppl is not None:
loc_inds_2_drop.reverse() # go from high to low values
for val in loc_inds_2_drop:
del elec_col_suppl[val]
# Decide whether to plot L or R hemisphere based on x coordinates
if len(sides_2_display) > 1:
N, axes, sides_2_display = _setup_subplot_view(locs, sides_2_display, figsize)
else:
N = 1
axes = ax_in
if allLH:
average_xpos_sign = np.mean(np.asarray(locs["x"]))
if average_xpos_sign > 0:
locs["x"] = -locs["x"]
sides_2_display = "l"
if colors is None:
colors = list()
# Label strips/depths differently for easier visualization (or use defined color list)
if len(colors) == 0:
for s in range(locs.shape[0]):
if s >= num_grid_chans:
colors.append("r")
else:
colors.append("b")
if elec_col_suppl is not None:
colors = elec_col_suppl.copy()
# Rearrange to plot non-grid electrode first
if num_grid_chans > 0: # isinstance(colors, list):
locs2 = locs.copy()
locs2["x"] = np.concatenate(
(locs["x"][num_grid_chans:], locs["x"][:num_grid_chans]), axis=0
)
locs2["y"] = np.concatenate(
(locs["y"][num_grid_chans:], locs["y"][:num_grid_chans]), axis=0
)
locs2["z"] = np.concatenate(
(locs["z"][num_grid_chans:], locs["z"][:num_grid_chans]), axis=0
)
if isinstance(colors, list):
colors2 = colors.copy()
colors2 = colors[num_grid_chans:] + colors[:num_grid_chans]
else:
colors2 = colors
else:
locs2 = locs.copy()
if isinstance(colors, list):
colors2 = colors.copy()
else:
colors2 = colors # [colors for i in range(locs2.shape[0])]
# Plot the result
_plot_electrodes(
locs2,
node_size,
colors2,
axes,
sides_2_display,
N,
node_edge_colors,
alpha,
edge_linewidths,
)
def _plot_electrodes(
locs,
node_size,
colors,
axes,
sides_2_display,
N,
node_edge_colors,
alpha,
edge_linewidths,
marker="o",
):
"""
Handles plotting of electrodes.
"""
if N == 1:
ni_plt.plot_connectome(
np.eye(locs.shape[0]),
locs,
output_file=None,
node_kwargs={
"alpha": alpha,
"edgecolors": node_edge_colors,
"linewidths": edge_linewidths,
"marker": marker,
},
node_size=node_size,
node_color=colors,
axes=axes,
display_mode=sides_2_display,
)
elif sides_2_display == "yrz" or sides_2_display == "ylz":
colspans = [
5,
6,
5,
] # different sized subplot to make saggital view similar size to other two slices
current_col = 0
total_colspans = int(np.sum(np.asarray(colspans)))
for ind, colspan in enumerate(colspans):
axes[ind] = plt.subplot2grid(
(1, total_colspans), (0, current_col), colspan=colspan, rowspan=1
)
ni_plt.plot_connectome(
np.eye(locs.shape[0]),
locs,
output_file=None,
node_kwargs={
"alpha": alpha,
"edgecolors": node_edge_colors,
"linewidths": edge_linewidths,
"marker": marker,
},
node_size=node_size,
node_color=colors,
axes=axes[ind],
display_mode=sides_2_display[ind],
)
current_col += colspan
else:
for i in range(N):
ni_plt.plot_connectome(
np.eye(locs.shape[0]),
locs,
output_file=None,
node_kwargs={
"alpha": alpha,
"edgecolors": node_edge_colors,
"linewidths": edge_linewidths,
"marker": marker,
},
node_size=node_size,
node_color=colors,
axes=axes[i],
display_mode=sides_2_display[i],
)
def plot_ecog_pow(
lp,
rois_plt,
freq_range,
sbplt_titles,
part_id="P01",
n_parts=12,
nrows=2,
ncols=4,
figsize=(7, 4),
):
"""Plot ECoG projected spectral power."""
fig, ax = plt.subplots(nrows, ncols, figsize=figsize, dpi=150)
# Plot projected power for all participants
fig, ax = _ecog_pow_group(
fig,
ax,
lp,
rois_plt,
freq_range,
sbplt_titles,
n_parts,
nrows,
ncols,
row_ind=0,
)
# Plot projected power for 1 participant
fig, ax = _ecog_pow_single(
fig,
ax,
lp,
rois_plt,
freq_range,
sbplt_titles,
n_parts,
nrows,
ncols,
row_ind=1,
part_id=part_id,
)
fig.tight_layout()
plt.show()
def _ecog_pow_group(
fig,
ax,
lp,
rois_plt,
freq_range,
sbplt_titles,
n_parts=12,
nrows=2,
ncols=4,
row_ind=0,
):
"""Plot projected power for all participants."""
freqs_vals = np.arange(freq_range[0], freq_range[1] + 1).tolist()
fig.subplots_adjust(hspace=0.5)
fig.subplots_adjust(wspace=0.1)
power, freqs, parts = [], [], []
n_wins_sbj = []
for k, roi in enumerate(rois_plt):
power_roi, freqs_roi, parts_roi = [], [], []
for j in range(n_parts):
dat = np.load(lp + "P" + str(j + 1).zfill(2) + "_" + roi + ".npy")
dat = 10 * np.log10(dat)
for i in range(dat.shape[0]):
power_roi.extend(dat[i, :].tolist())
freqs_roi.extend(freqs_vals)
parts_roi.extend(["P" + str(j + 1).zfill(2)] * len(freqs_vals))
if k == 0:
n_wins_sbj.append(dat.shape[0])
power.extend(power_roi)
freqs.extend(freqs_roi)
parts.extend(parts_roi)
parts_uni = np.unique(np.asarray(parts_roi))[::-1].tolist()
df_roi = pd.DataFrame(
{"Power": power_roi, "Freqs": freqs_roi, "Parts": parts_roi}
)
col = k % ncols
ax_curr = ax[row_ind, col] if nrows > 1 else ax[col]
leg = False # 'brief' if k==3 else False
sns.lineplot(
data=df_roi,
x="Freqs",
y="Power",
hue="Parts",
ax=ax_curr,
ci="sd",
legend=leg,
palette=["darkgray"] * len(parts_uni),
hue_order=parts_uni,
) # palette='Blues'
# ax_curr.set_xscale('log')
ax_curr.set_xlim(freq_range)
ax_curr.set_ylim([-20, 30])
ax_curr.spines["right"].set_visible(False)
ax_curr.spines["top"].set_visible(False)
ax_curr.set_xlim(freq_range)
ax_curr.set_xticks(
[freq_range[0]] + np.arange(20, 101, 20).tolist() + [freq_range[1]]
)
ylab = "" # '' if k%ncols > 0 else 'Power\n(dB)' # 10log(uV^2)
xlab = "" # 'Frequency (Hz)' if k//ncols==(nrows-1) else ''
ax_curr.set_ylabel(ylab, rotation=0, labelpad=15, fontsize=9)
ax_curr.set_xlabel(xlab, fontsize=9)
if k % ncols > 0:
l_yticks = len(ax_curr.get_yticklabels())
ax_curr.set_yticks(ax_curr.get_yticks().tolist())
ax_curr.set_yticklabels([""] * l_yticks)
ax_curr.tick_params(axis="both", which="major", labelsize=8)
ax_curr.set_title(sbplt_titles[k], fontsize=9)
return fig, ax
def _ecog_pow_single(
fig,
ax,
lp,
rois_plt,
freq_range,
sbplt_titles,
n_parts=12,
nrows=2,
ncols=4,
row_ind=1,
part_id="P01",
):
"""Plot projected power for a single participant."""
part_id = "P01"
freqs_vals = np.arange(freq_range[0], freq_range[1] + 1).tolist()
power, freqs, parts = [], [], []
n_wins_sbj = []
for k, roi in enumerate(rois_plt):
power_roi, freqs_roi, parts_roi = [], [], []
dat = np.load(lp + part_id + "_" + roi + ".npy")
dat = 10 * np.log10(dat)
for i in range(dat.shape[0]):
power_roi.extend(dat[i, :].tolist())
freqs_roi.extend(freqs_vals)
parts_roi.extend([i] * len(freqs_vals))
if k == 0:
n_wins_sbj.append(dat.shape[0])
power.extend(power_roi)
freqs.extend(freqs_roi)
parts.extend(parts_roi)
parts_uni = np.unique(np.asarray(parts_roi))[::-1].tolist()
df_roi = pd.DataFrame(
{"Power": power_roi, "Freqs": freqs_roi, "Parts": parts_roi}
)
col = k % ncols
ax_curr = ax[row_ind, col] if nrows > 1 else ax[col]
leg = False # 'brief' if k==3 else False
sns.lineplot(
data=df_roi,
x="Freqs",
y="Power",
hue="Parts",
ax=ax_curr,
ci=None,
legend=leg,
palette=["darkgray"] * len(parts_uni),
hue_order=parts_uni,
linewidth=0.2,
) # palette='Blues'
ax_curr.set_xlim(freq_range)
ax_curr.set_ylim([-20, 30])
ax_curr.spines["right"].set_visible(False)
ax_curr.spines["top"].set_visible(False)
ax_curr.set_xlim(freq_range)
ax_curr.set_xticks(
[freq_range[0]] + np.arange(20, 101, 20).tolist() + [freq_range[1]]
)
ylab = "" # '' if k%ncols > 0 else 'Power\n(dB)' # 10log(uV^2)
xlab = "" # 'Frequency (Hz)' if k//ncols==(nrows-1) else ''
ax_curr.set_ylabel(ylab, rotation=0, labelpad=15, fontsize=9)
ax_curr.set_xlabel(xlab, fontsize=9)
if k % ncols > 0:
l_yticks = len(ax_curr.get_yticklabels())
ax_curr.set_yticks(ax_curr.get_yticks().tolist())
ax_curr.set_yticklabels([""] * l_yticks)
ax_curr.tick_params(axis="both", which="major", labelsize=8)
ax_curr.set_title(sbplt_titles[k], fontsize=9)
return fig, ax
def plot_dlc_recon_errs(fig, ax):
"""Plots DeepLabCut reconstruction errors on training and heldout
images. This information is not present in the NWB files."""
# DLC reconstruction errors [train set, holdout set]
sbj_d = {
"P01": [1.45, 4.27],
"P02": [1.44, 3.58],
"P03": [1.58, 6.95],
"P04": [1.63, 6.02],
"P05": [1.43, 3.42],
"P06": [1.43, 6.63],
"P07": [1.51, 5.45],
"P08": [1.84, 10.35],
"P09": [1.4, 4.05],
"P10": [1.48, 7.59],
"P11": [1.51, 5.45],
"P12": [1.52, 4.73],
}
train_err = [val[0] for key, val in sbj_d.items()]
test_err = [val[1] for key, val in sbj_d.items()]
nsbjs = len(train_err)
sbj_nums = [val + 1 for val in range(nsbjs)]
sbj = ["P" + str(val).zfill(2) for val in sbj_nums]
# Create plot
ax.bar(sbj, train_err, color="dimgrey")
ax.bar(sbj, test_err, color="lightgrey")
ax.bar(sbj, train_err, color="dimgrey")
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.set_xticklabels(sbj, rotation=45)
ax.legend(["Train set", "Holdout set"], frameon=False, fontsize=8)
ax.tick_params(labelsize=9)
ax.set_ylabel("Reconstruction error (pixels)")
ax.set_title("(a) Pose estimation model errors", fontsize=10)
def plot_wrist_trajs(
fig,
ax,
lp=None,
base_start=-1.5,
base_end=-1,
before=3,
after=3,
fs_video=30,
n_parts=12,
):
"""Plot contralateral wrist trajectories during move onset events."""
df_pose, part_lst = _get_wrist_trajs(
base_start, base_end, before, after, fs_video, n_parts
)
df_pose_orig = df_pose.copy()
df_pose = df_pose_orig.loc[df_pose["Contra"] == "contra", :]
# Set custom color palette
sns.set_palette(sns.color_palette(["gray"]))
uni_sbj = np.unique( | np.asarray(part_lst) | numpy.asarray |
# -*- coding: utf-8 -*-
"""
Geophysics for ageobot.
TODO: Move some of this to Bruges.
"""
import numpy as np
from PIL import ImageStat
def is_greyscale(im):
stat = ImageStat.Stat(im)
if sum(stat.sum[:3])/3 == stat.sum[0]:
return True
return False
def hilbert(s, phi=0):
"""
Optional phase shift phi in degrees.
I don't understand why I need to handle the
real and complex parts separately.
"""
n = s.size
m = int(np.ceil((n + 1) / 2))
r0 = np.exp(1j * np.radians(phi))
# Real part.
rr = np.ones(n, dtype=complex)
rr[:m] = r0
rr[m+1:] = np.conj(r0)
# Imag part.
ri = np.ones(n, dtype=complex)
ri[:m] = r0
ri[m+1:] = -1 * r0
_Sr = rr * np.fft.fft(s)
_Si = ri * np.fft.fft(s)
hr = np.fft.ifft(_Sr)
hi = np.fft.ifft(_Si)
h = np.zeros_like(hr, dtype=complex)
h += hr.real + hi.imag * 1j
return h
def trim_mean(i, proportion):
"""
Trim mean, roughly emulating scipy.stats.trim_mean().
Must deal with arrays or lists.
"""
a = np.sort(np.array(i))
k = int(np.floor(a.size * proportion))
return np.nanmean(a[k:-k])
def parabolic(f, x):
"""
Interpolation.
"""
x = int(x)
f = np.concatenate([f, [f[-1]]])
xv = 1/2. * (f[x-1] - f[x+1]) / (f[x-1] - 2 * f[x] + f[x+1]) + x
yv = f[x] - 1/4. * (f[x-1] - f[x+1]) * (xv - x)
return (xv, yv)
def freq_from_crossings(sig, fs):
"""
Dominant frequency from zero-crossings.
"""
indices, = np.where((sig[1:] >= 0) & (sig[:-1] < 0))
crossings = [i - sig[i] / (sig[i+1] - sig[i]) for i in indices]
print("************* xings", crossings)
return fs / np.mean(np.diff(crossings))
def freq_from_autocorr(sig, fs):
"""
Dominant frequency from autocorrelation.
"""
sig = sig + 128
corr = np.convolve(sig, sig[::-1], mode='full')
corr = corr[int(len(corr)/2):]
d = np.diff(corr)
start = (d > 0).nonzero()[0][0] # nonzero() returns a tuple
peak = np.argmax(corr[int(start):]) + start
px, py = parabolic(corr, peak)
return fs / px
def get_spectrum(signal, fs):
windowed = signal * np.blackman(len(signal))
a = abs(np.fft.rfft(windowed))
f = np.fft.rfftfreq(len(signal), 1/fs)
db = 20 * np.log10(a)
sig = db - np.amax(db) + 20
indices = ((sig[1:] >= 0) & (sig[:-1] < 0)).nonzero()
crossings = [z - sig[z] / (sig[z+1] - sig[z]) for z in indices]
try:
mi, ma = np.amin(crossings), | np.amax(crossings) | numpy.amax |
# Copyright 2016 Sandia Corporation and the National Renewable Energy
# Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
The Extreme Sea State Contour (ESSC) module contains the tools necessary to
calculate environmental contours of extreme sea states for buoy data.
'''
import numpy as np
import scipy.stats as stats
import scipy.optimize as optim
import scipy.interpolate as interp
import matplotlib.pyplot as plt
import h5py
from sklearn.decomposition import PCA as skPCA
import requests
import bs4
import urllib.request
import re
from datetime import datetime, date
import os
import glob
import copy
import statsmodels.api as sm
from statsmodels import robust
import urllib
import matplotlib
class EA:
'''The Environmental Assessment (EA) class points to functions for
various contour methods (including getContours and getSamples) and allows
the user to plot results (plotData), sample along the contour
(getContourPoints), calculate the wave breaking steepness curve (steepness)
and/or use the bootstrap method to calculate 95% confidence bounds about
the contours (bootStrap).'''
def __init__():
return
def getContours():
'''Points to the getContours function in whatever contouring method is used'''
return
def getSamples():
'''Points to the getSamples function in whatever contouring method is
used, currently only implemented for PCA contours. Implementation for
additional contour methods planned for future release.'''
return
def saveContour(self, fileName=None):
'''Saves all available contour data obtained via the EA module to
a .h5 file
Parameters
----------
fileName : string
relevent path and filename where the .h5 file will be created and
saved. If no filename, the h5 file will be named NDBC(buoyNum).h5
'''
if (fileName is None):
fileName = 'NDBC' + str(self.buoy.buoyNum) + '.h5'
else:
_, file_extension = os.path.splitext(fileName)
if not file_extension:
fileName = fileName + '.h5'
print(fileName);
with h5py.File(fileName, 'a') as f:
if('method' in f):
f['method'][...] = self.method
else:
f.create_dataset('method', data=self.method)
if('parameters' in f):
gp = f['parameters']
else:
gp = f.create_group('parameters')
self._saveParams(gp)
if(self.Hs_ReturnContours is not None):
if('ReturnContours' in f):
grc = f['ReturnContours']
else:
grc = f.create_group('ReturnContours')
if('T_Return' in grc):
f_T_Return = grc['T_Return']
f_T_Return[...] = self.T_ReturnContours
else:
f_T_Return = grc.create_dataset('T_Return', data=self.T_ReturnContours)
f_T_Return.attrs['units'] = 's'
f_T_Return.attrs['description'] = 'contour, energy period'
if('Hs_Return' in grc):
f_Hs_Return = grc['Hs_Return']
f_Hs_Return[...] = self.Hs_ReturnContours
else:
f_Hs_Return = grc.create_dataset('Hs_Return', data=self.Hs_ReturnContours)
f_Hs_Return.attrs['units'] = 'm'
f_Hs_Return.attrs['description'] = 'contours, significant wave height'
# Samples for full sea state long term analysis
if(hasattr(self, 'Hs_SampleFSS') and self.Hs_SampleFSS is not None):
if('Samples_FullSeaState' in f):
gfss = f['Samples_FullSeaState']
else:
gfss = f.create_group('Samples_FullSeaState')
if('Hs_SampleFSS' in gfss):
f_Hs_SampleFSS = gfss['Hs_SampleFSS']
f_Hs_SampleFSS[...] = self.Hs_SampleFSS
else:
f_Hs_SampleFSS = gfss.create_dataset('Hs_SampleFSS', data=self.Hs_SampleFSS)
f_Hs_SampleFSS.attrs['units'] = 'm'
f_Hs_SampleFSS.attrs['description'] = 'full sea state significant wave height samples'
if('T_SampleFSS' in gfss):
f_T_SampleFSS = gfss['T_SampleFSS']
f_T_SampleFSS[...] = self.T_SampleFSS
else:
f_T_SampleFSS = gfss.create_dataset('T_SampleFSS', data=self.T_SampleFSS)
f_T_SampleFSS.attrs['units'] = 's'
f_T_SampleFSS.attrs['description'] = 'full sea state energy period samples'
if('Weight_SampleFSS' in gfss):
f_Weight_SampleFSS = gfss['Weight_SampleFSS']
f_Weight_SampleFSS[...] = self.Weight_SampleFSS
else:
f_Weight_SampleFSS = gfss.create_dataset('Weight_SampleFSS', data = self.Weight_SampleFSS)
f_Weight_SampleFSS.attrs['description'] = 'full sea state relative weighting samples'
# Samples for contour approach long term analysis
if(hasattr(self, 'Hs_SampleCA') and self.Hs_SampleCA is not None):
if('Samples_ContourApproach' in f):
gca = f['Samples_ContourApproach']
else:
gca = f.create_group('Samples_ContourApproach')
if('Hs_SampleCA' in gca):
f_Hs_sampleCA = gca['Hs_SampleCA']
f_Hs_sampleCA[...] = self.Hs_SampleCA
else:
f_Hs_sampleCA = gca.create_dataset('Hs_SampleCA', data=self.Hs_SampleCA)
f_Hs_sampleCA.attrs['units'] = 'm'
f_Hs_sampleCA.attrs['description'] = 'contour approach significant wave height samples'
if('T_SampleCA' in gca):
f_T_sampleCA = gca['T_SampleCA']
f_T_sampleCA[...] = self.T_SampleCA
else:
f_T_sampleCA = gca.create_dataset('T_SampleCA', data=self.T_SampleCA)
f_T_sampleCA.attrs['units'] = 's'
f_T_sampleCA.attrs['description'] = 'contour approach energy period samples'
def plotData(self):
"""
Display a plot of the 100-year return contour, full sea state samples
and contour samples
"""
plt.figure()
plt.plot(self.buoy.T, self.buoy.Hs, 'bo', alpha=0.1, label='NDBC data')
plt.plot(self.T_ReturnContours, self.Hs_ReturnContours, 'k-', label='100 year contour')
#plt.plot(self.T_SampleFSS, self.Hs_SampleFSS, 'ro', label='full sea state samples')
#plt.plot(self.T_SampleCA, self.Hs_SampleCA, 'y^', label='contour approach samples')
plt.legend(loc='lower right', fontsize='small')
plt.grid(True)
plt.xlabel('Energy period, $T_e$ [s]')
plt.ylabel('Sig. wave height, $H_s$ [m]')
plt.show()
def getContourPoints(self, T_Sample):
'''Get Hs points along a specified environmental contour using
user-defined T values.
Parameters
----------
T_Sample : nparray
points for sampling along return contour
Returns
-------
Hs_SampleCA : nparray
points sampled along return contour
Example
-------
To calculate Hs values along the contour at specific
user-defined T values:
import WDRT.ESSC as ESSC
import numpy as np
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create PCA EA object for buoy
pca46022 = ESSC.PCA(buoy46022)
# Declare required parameters
Time_SS = 1. # Sea state duration (hrs)
Time_r = 100 # Return periods (yrs) of interest
nb_steps = 1000 # Enter discretization of the circle in the normal space (optional)
# Generate contour
Hs_Return, T_Return = pca46022.getContours(Time_SS, Time_r,nb_steps)
# Use getContourPoints to find specific points along the contour
T_sampleCA = np.arange(12, 26, 2)
Hs_sampleCA = pca46022.getContourPoints(T_sampleCA)
'''
#finds minimum and maximum energy period values
amin = np.argmin(self.T_ReturnContours)
amax = np.argmax(self.T_ReturnContours)
#finds points along the contour
w1 = self.Hs_ReturnContours[amin:amax]
w2 = np.concatenate((self.Hs_ReturnContours[amax:], self.Hs_ReturnContours[:amin]))
if (np.max(w1) > np.max(w2)):
x1 = self.T_ReturnContours[amin:amax]
y = self.Hs_ReturnContours[amin:amax]
else:
x1 = np.concatenate((self.T_ReturnContours[amax:], self.T_ReturnContours[:amin]))
y1 = np.concatenate((self.Hs_ReturnContours[amax:], self.Hs_ReturnContours[:amin]))
#sorts data based on the max and min energy period values
ms = np.argsort(x1)
x = x1[ms]
y = y1[ms]
#interpolates the sorted data
si = interp.interp1d(x, y)
#finds the wave height based on the user specified energy period values
Hs_SampleCA = si(T_Sample)
self.T_SampleCA = T_Sample
self.Hs_SampleCA = Hs_SampleCA
return Hs_SampleCA
def steepness(self, SteepMax, T_vals, depth = None):
'''This function calculates a steepness curve to be plotted on an H vs. T
diagram. First, the function calculates the wavelength based on the
depth and T. The T vector can be the input data vector, or will be
created below to cover the span of possible T values.
The function solves the dispersion relation for water waves
using the Newton-Raphson method. All outputs are solved for exactly
using: :math:`hw^2/g = kh*tanh(khG)`
Approximations that could be used in place of this code for deep
and shallow water, as appropriate:
deep water: :math:`h/\lambda \geq 1/2, tanh(kh) \sim 1, \lambda = (gT^2)/(2\pi)`
shallow water: :math:`h/\lambda \leq 1/20, tanh(kh) \sim kh, \lambda = \sqrt{T(gh)}`
Parameters
----------
SteepMax: float
Wave breaking steepness estimate (e.g., 0.07).
T_vals :np.array
Array of T values [sec] at which to calculate the breaking height.
depth: float
Depth at site
Note: if not inputted, the depth will tried to be grabbed from the respective
buoy type's website.
Returns
-------
SteepH: np.array
H values [m] that correspond to the T_mesh values creating the
steepness curve.
T_steep: np.array
T values [sec] over which the steepness curve is defined.
Example
-------
To find limit the steepness of waves on a contour by breaking:
import numpy as np
import WDRT.ESSC as ESSC
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create PCA EA object for buoy
pca46022 = ESSC.PCA(buoy46022)
T_vals = np.arange(0.1, np.amax(buoy46022.T), 0.1)
# Enter estimate of breaking steepness
SteepMax = 0.07 # Reference DNV-RP-C205
# Declare required parameters
depth = 391.4 # Depth at measurement point (m)
SteepH = pca46022.steepness(depth,SteepMax,T_vals)
'''
# Calculate the wavelength at a given depth at each value of T
if depth == None:
depth = self.__fetchDepth()
lambdaT = []
g = 9.81 # [m/s^2]
omega = ((2 * np.pi) / T_vals)
lambdaT = []
for i in range(len(T_vals)):
# Initialize kh using Eckart 1952 (mentioned in Holthuijsen pg. 124)
kh = (omega[i]**2) * depth / \
(g * (np.tanh((omega[i]**2) * depth / g)**0.5))
# Find solution using the Newton-Raphson Method
for j in range(1000):
kh0 = kh
f0 = (omega[i]**2) * depth / g - kh0 * np.tanh(kh0)
df0 = -np.tanh(kh) - kh * (1 - np.tanh(kh)**2)
kh = -f0 / df0 + kh0
f = (omega[i]**2) * depth / g - kh * np.tanh(kh)
if abs(f0 - f) < 10**(-6):
break
lambdaT.append((2 * np.pi) / (kh / depth))
del kh, kh0
lambdaT = np.array(lambdaT, dtype=np.float)
SteepH = lambdaT * SteepMax
return SteepH
def __fetchDepth(self):
'''Obtains the depth from the website for a buoy (either NDBC or CDIP)'''
if self.buoy.buoyType == "NDBC":
url = "https://www.ndbc.noaa.gov/station_page.php?station=%s" % (46022)
ndbcURL = requests.get(url)
ndbcURL.raise_for_status()
ndbcHTML = bs4.BeautifulSoup(ndbcURL.text, "lxml")
header = ndbcHTML.find("b", text="Water depth:")
return float(str(header.nextSibling).split()[0])
elif self.buoy.buoyType == "CDIP":
url = "http://cdip.ucsd.edu/cgi-bin/wnc_metadata?ARCHIVE/%sp1/%sp1_historic" % (self.buoy.buoyNum, self.buoy.buoyNum)
cdipURL = requests.get(url)
cdipURL.raise_for_status()
cdipHTML = bs4.BeautifulSoup(cdipURL.text, "lxml")
#Parse the table for the depth value
depthString = str(cdipHTML.findChildren("td", {"class" : "plus"})[0])
depthString = depthString.split("<br/>")[2]
return float(re.findall(r"[-+]?\d*\.\d+|\d+", depthString)[0])
def bootStrap(self, boot_size=1000, plotResults=True):
'''Get 95% confidence bounds about a contour using the bootstrap
method. Warning - this function is time consuming. Computation
time depends on selected boot_size.
Parameters
----------
boot_size: int (optional)
Number of bootstrap samples that will be used to calculate 95%
confidence interval. Should be large enough to calculate stable
statistics. If left blank will be set to 1000.
plotResults: boolean (optional)
Option for showing plot of bootstrap confidence bounds. If left
blank will be set to True and plot will be shown.
Returns
-------
contourmean_Hs : nparray
Hs values for mean contour calculated as the average over all
bootstrap contours.
contourmean_T : nparray
T values for mean contour calculated as the average over all
bootstrap contours.
Example
-------
To generate 95% boostrap contours for a given contour method:
import WDRT.ESSC as ESSC
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create PCA EA object for buoy
pca46022 = ESSC.PCA(buoy46022)
# Declare required parameters
Time_SS = 1. # Sea state duration (hrs)
Time_r = 100 # Return periods (yrs) of interest
nb_steps = 1000 # Enter discretization of the circle in the normal space (optional)
# Contour generation
Hs_Return, T_Return = pca46022.getContours(Time_SS, Time_r,nb_steps)
# Calculate boostrap confidence interval
contourmean_Hs, contourmean_T = pca46022.bootStrap(boot_size=10)
'''
if (self.method == "Bivariate KDE, Log Transform" or
self.method == "Bivariate KDE"):
msg = 'WDRT does not support the bootstrap method for this Bivariate KDE (See Issue #47).'
print(msg)
return None, None
#preallocates arrays
n = len(self.buoy.Hs)
Hs_Return_Boot = np.zeros([self.nb_steps,boot_size])
T_Return_Boot = np.zeros([self.nb_steps,boot_size])
buoycopy = copy.deepcopy(self.buoy);
#creates copies of the data based on how it was modeled.
for i in range(boot_size):
boot_inds = np.random.randint(0, high=n, size=n)
buoycopy.Hs = copy.deepcopy(self.buoy.Hs[boot_inds])
buoycopy.T = copy.deepcopy(self.buoy.T[boot_inds])
essccopy=None
if self.method == "Principle component analysis":
essccopy = PCA(buoycopy, self.size_bin)
elif self.method == "Gaussian Copula":
essccopy = GaussianCopula(buoycopy, self.n_size, self.bin_1_limit, self.bin_step)
elif self.method == "Rosenblatt":
essccopy = Rosenblatt(buoycopy, self.n_size, self.bin_1_limit, self.bin_step)
elif self.method == "Clayton Copula":
essccopy = ClaytonCopula(buoycopy, self.n_size, self.bin_1_limit, self.bin_step)
elif self.method == "Gumbel Copula":
essccopy = GumbelCopula(buoycopy, self.n_size, self.bin_1_limit, self.bin_step, self.Ndata)
elif self.method == "Non-parametric Gaussian Copula":
essccopy = NonParaGaussianCopula(buoycopy, self.Ndata, self.max_T, self.max_Hs)
elif self.method == "Non-parametric Clayton Copula":
essccopy = NonParaClaytonCopula(buoycopy, self.Ndata, self.max_T, self.max_Hs)
elif self.method == "Non-parametric Gumbel Copula":
essccopy = NonParaGumbelCopula(buoycopy, self.Ndata, self.max_T, self.max_Hs)
Hs_Return_Boot[:,i],T_Return_Boot[:,i] = essccopy.getContours(self.time_ss, self.time_r, self.nb_steps)
#finds 95% CI values for wave height and energy
contour97_5_Hs = np.percentile(Hs_Return_Boot,97.5,axis=1)
contour2_5_Hs = np.percentile(Hs_Return_Boot,2.5,axis=1)
contourmean_Hs = np.mean(Hs_Return_Boot, axis=1)
contour97_5_T = np.percentile(T_Return_Boot,97.5,axis=1)
contour2_5_T = np.percentile(T_Return_Boot,2.5,axis=1)
contourmean_T = np.mean(T_Return_Boot, axis=1)
self.contourMean_Hs = contourmean_Hs
self.contourMean_T = contourmean_T
#plotting function
def plotResults():
plt.figure()
plt.plot(self.buoy.T, self.buoy.Hs, 'bo', alpha=0.1, label='NDBC data')
plt.plot(self.T_ReturnContours, self.Hs_ReturnContours, 'k-', label='100 year contour')
plt.plot(contour97_5_T, contour97_5_Hs, 'r--', label='95% bootstrap confidence interval')
plt.plot(contour2_5_T, contour2_5_Hs, 'r--')
plt.plot(contourmean_T, contourmean_Hs, 'r-', label='Mean bootstrap contour')
plt.legend(loc='lower right', fontsize='small')
plt.grid(True)
plt.xlabel('Energy period, $T_e$ [s]')
plt.ylabel('Sig. wave height, $H_s$ [m]')
plt.show()
if plotResults:
plotResults()
return contourmean_Hs, contourmean_T
def outsidePoints(self):
'''Determines which buoy observations are outside of a given contour.
Parameters
----------
None
Returns
-------
outsideHs : nparray
The Hs values of the observations that are outside of the contour
outsideT : nparray
The T values of the observations that are outside of the contour
Example
-------
To get correseponding T and Hs arrays of observations that are outside
of a given contour:
import WDRT.ESSC as ESSC
import numpy as np
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create PCA EA object for buoy
rosen46022 = ESSC.Rosenblatt(buoy46022)
# Declare required parameters
Time_SS = 1. # Sea state duration (hrs)
Time_r = 100 # Return periods (yrs) of interest
# Generate contour
Hs_Return, T_Return = rosen46022.getContours(Time_SS, Time_r)
# Return the outside point Hs/T combinations
outsideT, outsideHs = rosen46022.outsidePoints()
'''
#checks if the contour type is a KDE contour - if so, finds the outside points for the KDE contour.
if isinstance(self.T_ReturnContours,list):
contains_test = np.zeros(len(self.buoy.T),dtype=bool)
for t,hs in zip(self.T_ReturnContours,self.Hs_ReturnContours):
path_contour = []
path_contour = matplotlib.path.Path(np.column_stack((t,hs)))
contains_test = contains_test+path_contour.contains_points(np.column_stack((self.buoy.T,self.buoy.Hs)))
out_inds = np.where(~contains_test)
else: # For non-KDE methods (copulas, etc.)
path_contour = matplotlib.path.Path(np.column_stack((self.T_ReturnContours,self.Hs_ReturnContours)))
contains_test = path_contour.contains_points(np.column_stack((self.buoy.T,self.buoy.Hs)))
out_inds = np.where(~contains_test)
outsideHs =self.buoy.Hs[out_inds]
outsideT = self.buoy.T[out_inds]
return(outsideT, outsideHs)
def contourIntegrator(self):
'''Calculates the area of the contour over the two-dimensional input
space of interest.
Parameters
----------
None
Returns
-------
area : float
The area of the contour in TxHs units.
Example
-------
To obtain the area of the contour:
import WDRT.ESSC as ESSC
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create PCA EA object for buoy
rosen46022 = ESSC.Rosenblatt(buoy46022)
# Declare required parameters
Time_SS = 1. # Sea state duration (hrs)
Time_r = 100 # Return periods (yrs) of interest
# Generate contour
Hs_Return, T_Return = rosen46022.getContours(Time_SS, Time_r)
# Return the area of the contour
rosenArea = rosen46022.contourIntegrator()
'''
contourTs = self.T_ReturnContours
contourHs = self.Hs_ReturnContours
area = 0.5*np.abs(np.dot(contourTs,np.roll(contourHs,1))-np.dot(contourHs,np.roll(contourTs,1)))
return area
def dataContour(self, tStepSize = 1, hsStepSize = .5):
'''Creates a contour around the ordered pairs of buoy observations. How tightly
the contour fits around the data will be determined by step size parameters.
Please note that this function currently is in beta; it needs further work to be
optimized for use.
Parameters
----------
tStepSize : float
Determines how far to search for the next point in the T direction.
Smaller values will produce contours that follow the data more closely.
hsStepSize : float
Determines how far to search for the next point in the Hs direction.
Smaller values will produce contours that follow the data more closely.
Returns
-------
dataBoundryHs : nparray
The Hs values of the boundry observations
dataBoundryT : nparray
The Hs values of the boundry observations
Example
-------
To get the corresponding data contour:
import WDRT.ESSC as ESSC
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create PCA EA object for buoy
rosen46022 = ESSC.Rosenblatt(buoy46022)
# Calculate the data contour
dataHs, dataT = rosen46022.dataContour(tStepSize = 1, hsStepSize = .5)
'''
maxHs = max(self.buoy.Hs)
minHs = min(self.buoy.Hs)
sortedHsBuoy = copy.deepcopy(self.buoy)
sortedTBuoy = copy.deepcopy(self.buoy)
sortedTIndex = sorted(range(len(self.buoy.T)),key=lambda x:self.buoy.T[x])
sortedHsIndex = sorted(range(len(self.buoy.Hs)),key=lambda x:self.buoy.Hs[x])
sortedHsBuoy.Hs = self.buoy.Hs[sortedHsIndex]
sortedHsBuoy.T = self.buoy.T[sortedHsIndex]
sortedTBuoy.Hs = self.buoy.Hs[sortedTIndex]
sortedTBuoy.T = self.buoy.T[sortedTIndex]
hsBin1 = []
hsBin2 = []
hsBin3 = []
hsBin4 = []
tBin1 = []
tBin2 = []
tBin3 = []
tBin4 = []
startingPoint = sortedTBuoy.T[0]
hsBin4.append(sortedTBuoy.Hs[0])
tBin4.append(sortedTBuoy.T[0])
while True:
tempNextBinTs = sortedTBuoy.T[sortedTBuoy.T < startingPoint + tStepSize]
tempNextBinHs = sortedTBuoy.Hs[sortedTBuoy.T < startingPoint + tStepSize]
nextBinTs = tempNextBinTs[tempNextBinTs > startingPoint]
nextBinHs = tempNextBinHs[tempNextBinTs > startingPoint]
try:
nextHs = max(nextBinHs)
nextT = nextBinTs[nextBinHs.argmax(axis=0)]
hsBin4.append(nextHs)
tBin4.append(nextT)
startingPoint = nextT
except ValueError:
startingPoint += tStepSize
break
if nextHs == maxHs:
break
startingPoint = sortedTBuoy.T[0]
hsBin1.append(sortedTBuoy.Hs[0])
tBin1.append(sortedTBuoy.T[0])
while True:
tempNextBinTs = sortedTBuoy.T[sortedTBuoy.T < startingPoint + tStepSize]
tempNextBinHs = sortedTBuoy.Hs[sortedTBuoy.T < startingPoint + tStepSize]
nextBinTs = tempNextBinTs[tempNextBinTs > startingPoint]
nextBinHs = tempNextBinHs[tempNextBinTs > startingPoint]
try:
nextHs = min(nextBinHs)
nextT = nextBinTs[nextBinHs.argmin(axis=0)]
hsBin1.append(nextHs)
tBin1.append(nextT)
startingPoint = nextT
except ValueError:
startingPoint += tStepSize
break
if nextHs == minHs:
break
startingPoint = sortedHsBuoy.Hs[sortedHsBuoy.T.argmax(axis=0)]
hsBin3.append(sortedHsBuoy.Hs[sortedHsBuoy.T.argmax(axis=0)])
tBin3.append(sortedHsBuoy.T[sortedHsBuoy.T.argmax(axis=0)])
while True:
tempNextBinTs = sortedHsBuoy.T[sortedHsBuoy.Hs < startingPoint + hsStepSize]
tempNextBinHs = sortedHsBuoy.Hs[sortedHsBuoy.Hs < startingPoint + hsStepSize]
nextBinTs = tempNextBinTs[tempNextBinHs > startingPoint]
nextBinHs = tempNextBinHs[tempNextBinHs > startingPoint]
try:
nextT = max(nextBinTs)
nextHs = nextBinHs[nextBinTs.argmax(axis=0)]
if nextHs not in hsBin4 and nextHs not in hsBin1:
hsBin3.append(nextHs)
tBin3.append(nextT)
startingPoint = nextHs
except ValueError:
startingPoint += hsStepSize
break
if nextHs == maxHs:
break
startingPoint = sortedHsBuoy.Hs[sortedHsBuoy.T.argmax(axis=0)]
while True:
tempNextBinTs = sortedHsBuoy.T[sortedHsBuoy.Hs > startingPoint - hsStepSize]
tempNextBinHs = sortedHsBuoy.Hs[sortedHsBuoy.Hs > startingPoint - hsStepSize]
nextBinTs = tempNextBinTs[tempNextBinHs < startingPoint]
nextBinHs = tempNextBinHs[tempNextBinHs < startingPoint]
try:
nextT = max(nextBinTs)
nextHs = nextBinHs[nextBinTs.argmax(axis=0)]
if nextHs not in hsBin1 and nextHs not in hsBin4:
hsBin2.append(nextHs)
tBin2.append(nextT)
startingPoint = nextHs
except ValueError:
startingPoint = startingPoint - hsStepSize
break
if nextHs == minHs:
break
hsBin2 = hsBin2[::-1] # Reverses the order of the array
tBin2 = tBin2[::-1]
hsBin4 = hsBin4[::-1] # Reverses the order of the array
tBin4 = tBin4[::-1]
dataBoundryHs = np.concatenate((hsBin1,hsBin2,hsBin3,hsBin4),axis = 0)
dataBoundryT = np.concatenate((tBin1,tBin2,tBin3,tBin4),axis = 0)
dataBoundryHs = dataBoundryHs[::-1]
dataBoundryT = dataBoundryT[::-1]
return(dataBoundryHs, dataBoundryT)
def __getCopulaParams(self,n_size,bin_1_limit,bin_step):
sorted_idx = sorted(range(len(self.buoy.Hs)),key=lambda x:self.buoy.Hs[x])
Hs = self.buoy.Hs[sorted_idx]
T = self.buoy.T[sorted_idx]
# Estimate parameters for Weibull distribution for component 1 (Hs) using MLE
# Estimate parameters for Lognormal distribution for component 2 (T) using MLE
para_dist_1=stats.exponweib.fit(Hs,floc=0,fa=1)
para_dist_2=stats.norm.fit(np.log(T))
# Binning
ind = np.array([])
ind = np.append(ind,sum(Hs_val <= bin_1_limit for Hs_val in Hs))
# Make sure first bin isn't empty or too small to avoid errors
while ind == 0 or ind < n_size:
ind = np.array([])
bin_1_limit = bin_1_limit + bin_step
ind = np.append(ind,sum(Hs_val <= bin_1_limit for Hs_val in Hs))
for i in range(1,200):
bin_i_limit = bin_1_limit+bin_step*(i)
ind = np.append(ind,sum(Hs_val <= bin_i_limit for Hs_val in Hs))
if (ind[i-0]-ind[i-1]) < n_size:
break
# Parameters for conditional distribution of T|Hs for each bin
num=len(ind) # num+1: number of bins
para_dist_cond = []
hss = []
para_dist_cond.append(stats.norm.fit(np.log(T[range(0,int(ind[0]))]))) # parameters for first bin
hss.append(np.mean(Hs[range(0,int(ind[0])-1)])) # mean of Hs (component 1 for first bin)
para_dist_cond.append(stats.norm.fit(np.log(T[range(0,int(ind[1]))]))) # parameters for second bin
hss.append(np.mean(Hs[range(0,int(ind[1])-1)])) # mean of Hs (component 1 for second bin)
for i in range(2,num):
para_dist_cond.append(stats.norm.fit(np.log(T[range(int(ind[i-2]),int(ind[i]))])));
hss.append(np.mean(Hs[range(int(ind[i-2]),int(ind[i]))]))
# Estimate coefficient using least square solution (mean: third order, sigma: 2nd order)
para_dist_cond.append(stats.norm.fit(np.log(T[range(int(ind[num-2]),int(len(Hs)))]))); # parameters for last bin
hss.append(np.mean(Hs[range(int(ind[num-2]),int(len(Hs)))])) # mean of Hs (component 1 for last bin)
para_dist_cond = np.array(para_dist_cond)
hss = np.array(hss)
phi_mean = np.column_stack((np.ones(num+1),hss[:],hss[:]**2,hss[:]**3))
phi_std = np.column_stack((np.ones(num+1),hss[:],hss[:]**2))
# Estimate coefficients of mean of Ln(T|Hs)(vector 4x1) (cubic in Hs)
mean_cond = np.linalg.lstsq(phi_mean,para_dist_cond[:,0])[0]
# Estimate coefficients of standard deviation of Ln(T|Hs) (vector 3x1) (quadratic in Hs)
std_cond = np.linalg.lstsq(phi_std,para_dist_cond[:,1])[0]
return para_dist_1, para_dist_2, mean_cond, std_cond
def __getNonParaCopulaParams(self,Ndata, max_T, max_Hs):
sorted_idx = sorted(range(len(self.buoy.Hs)),key=lambda x:self.buoy.Hs[x])
Hs = self.buoy.Hs[sorted_idx]
T = self.buoy.T[sorted_idx]
# Calcualte KDE bounds (this may be added as an input later)
min_limit_1 = 0
max_limit_1 = max_Hs
min_limit_2 = 0
max_limit_2 = max_T
# Discretize for KDE
pts_hs = np.linspace(min_limit_1, max_limit_1, self.Ndata)
pts_t = np.linspace(min_limit_2, max_limit_2, self.Ndata)
# Calculate optimal bandwidth for T and Hs
sig = robust.scale.mad(T)
num = float(len(T))
bwT = sig*(4.0/(3.0*num))**(1.0/5.0)
sig = robust.scale.mad(Hs)
num = float(len(Hs))
bwHs = sig*(4.0/(3.0*num))**(1.0/5.0)
# Nonparametric PDF for T
temp = sm.nonparametric.KDEUnivariate(T)
temp.fit(bw = bwT)
f_t = temp.evaluate(pts_t)
# Nonparametric CDF for Hs
temp = sm.nonparametric.KDEUnivariate(Hs)
temp.fit(bw = bwHs)
tempPDF = temp.evaluate(pts_hs)
F_hs = tempPDF/sum(tempPDF)
F_hs = np.cumsum(F_hs)
# Nonparametric CDF for T
F_t = f_t/sum(f_t)
F_t = np.cumsum(F_t)
nonpara_dist_1 = np.transpose(np.array([pts_hs, F_hs]))
nonpara_dist_2 = np.transpose(np.array([pts_t, F_t]))
nonpara_pdf_2 = np.transpose(np.array([pts_t, f_t]))
return nonpara_dist_1, nonpara_dist_2, nonpara_pdf_2
def __gumbelCopula(self, u, alpha):
''' Calculates the Gumbel copula density
Parameters
----------
u: np.array
Vector of equally spaced points between 0 and twice the
maximum value of T.
alpha: float
Copula parameter. Must be greater than or equal to 1.
Returns
-------
y: np.array
Copula density function.
'''
#Ignore divide by 0 warnings and resulting NaN warnings
np.seterr(all='ignore')
v = -np.log(u)
v = np.sort(v, axis=0)
vmin = v[0, :]
vmax = v[1, :]
nlogC = vmax * (1 + (vmin / vmax) ** alpha) ** (1 / alpha)
y = (alpha - 1 +nlogC)*np.exp(-nlogC+np.sum((alpha-1)*np.log(v)+v, axis =0) +(1-2*alpha)*np.log(nlogC))
np.seterr(all='warn')
return(y)
class PCA(EA):
def __init__(self, buoy, size_bin=250.):
'''
Create a PCA EA class for a buoy object. Contours generated under this
class will use principal component analysis (PCA) with improved
distribution fitting (Eckert et. al 2015) and the I-FORM.
Parameters
___________
size_bin : float
chosen bin size
buoy : NDBCData
ESSC.Buoy Object
'''
self.method = "Principle component analysis"
self.buoy = buoy
if size_bin > len(buoy.Hs)*0.25:
self.size_bin = len(buoy.Hs)*0.25
print(round(len(buoy.Hs)*0.25,2),'is the max bin size for this buoy. The bin size has been set to this amount.')
else:
self.size_bin = size_bin
self.Hs_ReturnContours = None
self.Hs_SampleCA = None
self.Hs_SampleFSS = None
self.T_ReturnContours = None
self.T_SampleCA = None
self.T_SampleFSS = None
self.Weight_points = None
self.coeff, self.shift, self.comp1_params, self.sigma_param, self.mu_param = self.__generateParams(self.size_bin)
def __generateParams(self, size_bin=250.0):
pca = skPCA(n_components=2)
pca.fit(np.array((self.buoy.Hs - self.buoy.Hs.mean(axis=0), self.buoy.T - self.buoy.T.mean(axis=0))).T)
coeff = abs(pca.components_) # Apply correct/expected sign convention
coeff[1, 1] = -1.0 * coeff[1, 1] # Apply correct/expected sign convention
Comp1_Comp2 = np.dot (np.array((self.buoy.Hs, self.buoy.T)).T, coeff)
shift = abs(min(Comp1_Comp2[:, 1])) + 0.1 # Calculate shift
shift = abs(min(Comp1_Comp2[:, 1])) + 0.1 # Calculate shift
# Apply shift to Component 2 to make all values positive
Comp1_Comp2[:, 1] = Comp1_Comp2[:, 1] + shift
Comp1_Comp2_sort = Comp1_Comp2[Comp1_Comp2[:, 0].argsort(), :]
# Fitting distribution of component 1
comp1_params = stats.invgauss.fit(Comp1_Comp2_sort[:, 0], floc=0)
n_data = len(self.buoy.Hs) # Number of observations
edges = np.hstack((np.arange(0, size_bin * np.ceil(n_data / size_bin),
size_bin), n_data + 1))
ranks = np.arange(n_data)
hist_count, _ = np.histogram(ranks, bins=edges)
bin_inds = np.digitize(ranks, bins=edges) - 1
Comp2_bins_params = np.zeros((2, int(max(bin_inds) + 1)))
Comp1_mean = np.array([])
for bin_loop in range(np.max(bin_inds) + 1):
mask_bins = bin_inds == bin_loop # Find location of bin values
Comp2_bin = np.sort(Comp1_Comp2_sort[mask_bins, 1])
Comp1_mean = np.append(Comp1_mean,
np.mean(Comp1_Comp2_sort[mask_bins, 0]))
# Calcualte normal distribution parameters for C2 in each bin
Comp2_bins_params[:, bin_loop] = np.array(stats.norm.fit(Comp2_bin))
mu_param, pcov = optim.curve_fit(self.__mu_fcn,
Comp1_mean.T, Comp2_bins_params[0, :])
sigma_param = self.__sigma_fits(Comp1_mean, Comp2_bins_params[1, :])
return coeff, shift, comp1_params, sigma_param, mu_param
def _saveParams(self, groupObj):
if('nb_steps' in groupObj):
groupObj['nb_steps'][...] = self.nb_steps
else:
groupObj.create_dataset('nb_steps', data=self.nb_steps)
if('time_r' in groupObj):
groupObj['time_r'][...] = self.time_r
else:
groupObj.create_dataset('time_r', data=self.time_r)
if('time_ss' in groupObj):
groupObj['time_ss'][...] = self.time_ss
else:
groupObj.create_dataset('time_ss', data=self.time_ss)
if('coeff' in groupObj):
groupObj['coeff'][...] = self.coeff
else:
groupObj.create_dataset('coeff', data=self.coeff)
if('shift' in groupObj):
groupObj['shift'][...] = self.shift
else:
groupObj.create_dataset('shift', data=self.shift)
if('comp1_params' in groupObj):
groupObj['comp1_params'][...] = self.comp1_params
else:
groupObj.create_dataset('comp1_params', data=self.comp1_params)
if('sigma_param' in groupObj):
groupObj['sigma_param'][...] = self.sigma_param
else:
groupObj.create_dataset('sigma_param', data=self.sigma_param)
if('mu_param' in groupObj):
groupObj['mu_param'][...] = self.mu_param
else:
groupObj.create_dataset('mu_param', data=self.mu_param)
def getContours(self, time_ss, time_r, nb_steps=1000):
'''WDRT Extreme Sea State PCA Contour function
This function calculates environmental contours of extreme sea states using
principal component analysis and the inverse first-order reliability
method.
Parameters
___________
time_ss : float
Sea state duration (hours) of measurements in input.
time_r : np.array
Desired return period (years) for calculation of environmental
contour, can be a scalar or a vector.
nb_steps : int
Discretization of the circle in the normal space used for
inverse FORM calculation.
Returns
-------
Hs_Return : np.array
Calculated Hs values along the contour boundary following
return to original input orientation.
T_Return : np.array
Calculated T values along the contour boundary following
return to original input orientation.
nb_steps : float
Discretization of the circle in the normal space
Example
-------
To obtain the contours for a NDBC buoy::
import WDRT.ESSC as ESSC
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create PCA EA object for buoy
pca46022 = ESSC.PCA(buoy46022)
# Declare required parameters
Time_SS = 1. # Sea state duration (hrs)
Time_r = 100 # Return periods (yrs) of interest
nb_steps = 1000 # Enter discretization of the circle in the normal space (optional)
# Contour generation example
Hs_Return, T_Return = pca46022.getContours(Time_SS, Time_r,nb_steps)
'''
self.time_ss = time_ss
self.time_r = time_r
self.nb_steps = nb_steps
# IFORM
# Failure probability for the desired return period (time_R) given the
# duration of the measurements (time_ss)
p_f = 1 / (365 * (24 / time_ss) * time_r)
beta = stats.norm.ppf((1 - p_f), loc=0, scale=1) # Reliability
theta = np.linspace(0, 2 * np.pi, num = nb_steps)
# Vary U1, U2 along circle sqrt(U1^2+U2^2)=beta
U1 = beta * np.cos(theta)
U2 = beta * np.sin(theta)
# Calculate C1 values along the contour
Comp1_R = stats.invgauss.ppf(stats.norm.cdf(U1, loc=0, scale=1),
mu= self.comp1_params[0], loc=0,
scale= self.comp1_params[2])
# Calculate mu values at each point on the circle
mu_R = self.__mu_fcn(Comp1_R, self.mu_param[0], self.mu_param[1])
# Calculate sigma values at each point on the circle
sigma_R = self.__sigma_fcn(self.sigma_param, Comp1_R)
# Use calculated mu and sigma values to calculate C2 along the contour
Comp2_R = stats.norm.ppf(stats.norm.cdf(U2, loc=0, scale=1),
loc=mu_R, scale=sigma_R)
# Calculate Hs and T along the contour
Hs_Return, T_Return = self.__princomp_inv(Comp1_R, Comp2_R, self.coeff, self.shift)
Hs_Return = np.maximum(0, Hs_Return) # Remove negative values
self.Hs_ReturnContours = Hs_Return
self.T_ReturnContours = T_Return
return Hs_Return, T_Return
def getSamples(self, num_contour_points, contour_returns, random_seed=None):
'''WDRT Extreme Sea State Contour Sampling function.
This function calculates samples of Hs and T using the EA function to
sample between contours of user-defined return periods.
Parameters
----------
num_contour_points : int
Number of sample points to be calculated per contour interval.
contour_returns: np.array
Vector of return periods that define the contour intervals in
which samples will be taken. Values must be greater than zero and
must be in increasing order.
random_seed: int (optional)
Random seed for sample generation, required for sample
repeatability. If left blank, a seed will automatically be
generated.
Returns
-------
Hs_Samples: np.array
Vector of Hs values for each sample point.
Te_Samples: np.array
Vector of Te values for each sample point.
Weight_points: np.array
Vector of probabilistic weights for each sampling point
to be used in risk calculations.
Example
-------
To get weighted samples from a set of contours::
import numpy as np
import WDRT.ESSC as ESSC
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create PCA EA object for buoy
pca46022 = ESSC.PCA(buoy46022)
# Declare required parameters
Time_SS = 1. # Sea state duration (hrs)
Time_r = 100 # Return periods (yrs) of interest
num_contour_points = 10 # Number of points to be sampled for each contour interval
contour_returns = np.array([0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 50, 100])
# Calculate contour to save required variables to PCA EA object
pca46022.getContours(Time_SS, Time_r,nb_steps)
# Probabilities defining sampling contour bounds.
random_seed = 2 # Random seed for sample generation
# Get samples for a full sea state long term analysis
Hs_sampleFSS, T_sampleFSS, Weight_sampleFSS = pca46022.getSamples(num_contour_points,
contour_returns, random_seed)
'''
# Calculate line where Hs = 0 to avoid sampling Hs in negative space
Te_zeroline = np.linspace(2.5, 30, 1000)
Te_zeroline = np.transpose(Te_zeroline)
Hs_zeroline = np.zeros(len(Te_zeroline))
# Transform zero line into principal component space
Comp_zeroline = np.dot(np.transpose(np.vstack([Hs_zeroline, Te_zeroline])),
self.coeff)
Comp_zeroline[:, 1] = Comp_zeroline[:, 1] + self.shift
# Find quantiles along zero line
C1_zeroline_prob = stats.invgauss.cdf(Comp_zeroline[:, 0],
mu = self.comp1_params[0], loc=0,
scale = self.comp1_params[2])
mu_zeroline = self.__mu_fcn(Comp_zeroline[:, 0], self.mu_param[0], self.mu_param[1])
sigma_zeroline = self.__sigma_fcn(self.sigma_param, Comp_zeroline[:, 0])
C2_zeroline_prob = stats.norm.cdf(Comp_zeroline[:, 1],
loc=mu_zeroline, scale=sigma_zeroline)
C1_normzeroline = stats.norm.ppf(C1_zeroline_prob, 0, 1)
C2_normzeroline = stats.norm.ppf(C2_zeroline_prob, 0, 1)
contour_probs = 1 / (365 * (24 / self.time_ss) * contour_returns)
# Reliability contour generation
beta_lines = stats.norm.ppf(
(1 - contour_probs), 0, 1) # Calculate reliability
beta_lines = np.hstack((0, beta_lines)) # Add zero as lower bound to first
# contour
theta_lines = np.linspace(0, 2 * np.pi, 1000) # Discretize the circle
contour_probs = np.hstack((1, contour_probs)) # Add probablity of 1 to the
# reliability set, corresponding to probability of the center point of the
# normal space
# Vary U1,U2 along circle sqrt(U1^2+U2^2) = beta
U1_lines = np.dot(np.cos(theta_lines[:, None]), beta_lines[None, :])
U2_lines = np.dot(np.sin(theta_lines[:, None]), beta_lines[None, :])
# Removing values on the H_s = 0 line that are far from the circles in the
# normal space that will be evaluated to speed up calculations
minval = np.amin(U1_lines) - 0.5
mask = C1_normzeroline > minval
C1_normzeroline = C1_normzeroline[mask]
C2_normzeroline = C2_normzeroline[mask]
# Transform to polar coordinates
Theta_zeroline = np.arctan2(C2_normzeroline, C1_normzeroline)
Rho_zeroline = np.sqrt(C1_normzeroline**2 + C2_normzeroline**2)
Theta_zeroline[Theta_zeroline < 0] = Theta_zeroline[
Theta_zeroline < 0] + 2 * np.pi
Sample_alpha, Sample_beta, Weight_points = self.__generateData(beta_lines,
Rho_zeroline, Theta_zeroline, num_contour_points,contour_probs,random_seed)
Hs_Sample, T_Sample = self.__transformSamples(Sample_alpha, Sample_beta)
self.Hs_SampleFSS = Hs_Sample
self.T_SampleFSS = T_Sample
self.Weight_SampleFSS = Weight_points
return Hs_Sample, T_Sample, Weight_points
def plotSampleData(self):
"""
Display a plot of the 100-year return contour, full sea state samples
and contour samples
"""
plt.figure()
plt.plot(self.buoy.T, self.buoy.Hs, 'bo', alpha=0.1, label='NDBC data')
plt.plot(self.T_ReturnContours, self.Hs_ReturnContours, 'k-', label='100 year contour')
plt.plot(self.T_SampleFSS, self.Hs_SampleFSS, 'ro', label='full sea state samples')
plt.plot(self.T_SampleCA, self.Hs_SampleCA, 'y^', label='contour approach samples')
plt.legend(loc='lower right', fontsize='small')
plt.grid(True)
plt.xlabel('Energy period, $T_e$ [s]')
plt.ylabel('Sig. wave height, $H_s$ [m]')
plt.show()
def __generateData(self, beta_lines, Rho_zeroline, Theta_zeroline, num_contour_points, contour_probs, random_seed):
"""
Calculates radius, angle, and weight for each sample point
"""
''' Data generating function that calculates the radius, angle, and
weight for each sample point.
Parameters
----------
beta_lines: np.array
Array of mu fitting function parameters.
Rho_zeroline: np.array
array of radii
Theta_zeroline: np.array
num_contour_points: np.array
contour_probs: np.array
random_seed: int
seed for generating random data.
Returns
-------
Sample_alpha: np.array
Array of fitted sample angle values.
Sample_beta: np.array
Array of fitted sample radius values.
Weight_points: np.array
Array of weights for each point.
'''
np.random.seed(random_seed)
num_samples = (len(beta_lines) - 1) * num_contour_points
Alpha_bounds = np.zeros((len(beta_lines) - 1, 2))
Angular_dist = np.zeros(len(beta_lines) - 1)
Angular_ratio = np.zeros(len(beta_lines) - 1)
Alpha = np.zeros((len(beta_lines) - 1, num_contour_points + 1))
Weight = np.zeros(len(beta_lines) - 1)
Sample_beta = np.zeros(num_samples)
Sample_alpha = np.zeros(num_samples)
Weight_points = np.zeros(num_samples)
for i in range(len(beta_lines) - 1): # Loop over contour intervals
# Check if any of the radii for the
r = Rho_zeroline - beta_lines[i + 1]
# Hs=0, line are smaller than the radii of the contour, meaning
# that these lines intersect
if any(r < 0):
left = np.amin(np.where(r < -0.01))
right = np.amax(np.where(r < -0.01))
Alpha_bounds[i, :] = (Theta_zeroline[left], Theta_zeroline[right] -
2 * np.pi) # Save sampling bounds
else:
Alpha_bounds[i, :] = np.array((0, 2 * np.pi))
# Find the angular distance that will be covered by sampling the disc
Angular_dist[i] = sum(abs(Alpha_bounds[i]))
# Calculate ratio of area covered for each contour
Angular_ratio[i] = Angular_dist[i] / (2 * np.pi)
# Discretize the remaining portion of the disc into 10 equally spaced
# areas to be sampled
Alpha[i, :] = np.arange(min(Alpha_bounds[i]),
max(Alpha_bounds[i]) + 0.1, Angular_dist[i] / num_contour_points)
# Calculate the weight of each point sampled per contour
Weight[i] = ((contour_probs[i] - contour_probs[i + 1]) *
Angular_ratio[i] / num_contour_points)
for j in range(num_contour_points):
# Generate sample radius by adding a randomly sampled distance to
# the 'disc' lower bound
Sample_beta[(i) * num_contour_points + j] = (beta_lines[i] +
np.random.random_sample() * (beta_lines[i + 1] - beta_lines[i]))
# Generate sample angle by adding a randomly sampled distance to
# the lower bound of the angle defining a discrete portion of the
# 'disc'
Sample_alpha[(i) * num_contour_points + j] = (Alpha[i, j] +
np.random.random_sample() * (Alpha[i, j + 1] - Alpha[i, j]))
# Save the weight for each sample point
Weight_points[(i) * num_contour_points + j] = Weight[i]
return Sample_alpha, Sample_beta, Weight_points
def __transformSamples(self, Sample_alpha, Sample_beta):
Sample_U1 = Sample_beta * np.cos(Sample_alpha)
Sample_U2 = Sample_beta * np.sin(Sample_alpha)
# Sample transformation to principal component space
Comp1_sample = stats.invgauss.ppf(stats.norm.cdf(Sample_U1, loc=0, scale=1),
mu=self.comp1_params[0], loc=0,
scale=self.comp1_params[2])
mu_sample = self.__mu_fcn(Comp1_sample, self.mu_param[0], self.mu_param[1])
# Calculate sigma values at each point on the circle
sigma_sample = self.__sigma_fcn(self.sigma_param, Comp1_sample)
# Use calculated mu and sigma values to calculate C2 along the contour
Comp2_sample = stats.norm.ppf(stats.norm.cdf(Sample_U2, loc=0, scale=1),
loc=mu_sample, scale=sigma_sample)
# Sample transformation into Hs-T space
Hs_Sample, T_Sample = self.__princomp_inv(
Comp1_sample, Comp2_sample, self.coeff, self.shift)
return Hs_Sample, T_Sample
def __mu_fcn(self, x, mu_p_1, mu_p_2):
''' Linear fitting function for the mean(mu) of Component 2 normal
distribution as a function of the Component 1 mean for each bin.
Used in the EA and getSamples functions.
Parameters
----------
mu_p: np.array
Array of mu fitting function parameters.
x: np.array
Array of values (Component 1 mean for each bin) at which to evaluate
the mu fitting function.
Returns
-------
mu_fit: np.array
Array of fitted mu values.
'''
mu_fit = mu_p_1 * x + mu_p_2
return mu_fit
def __sigma_fcn(self,sig_p, x):
'''Quadratic fitting formula for the standard deviation(sigma) of Component
2 normal distribution as a function of the Component 1 mean for each bin.
Used in the EA and getSamples functions.
Parameters
----------
sig_p: np.array
Array of sigma fitting function parameters.
x: np.array
Array of values (Component 1 mean for each bin) at which to evaluate
the sigma fitting function.
Returns
-------
sigma_fit: np.array
Array of fitted sigma values.
'''
sigma_fit = sig_p[0] * x**2 + sig_p[1] * x + sig_p[2]
return sigma_fit
def __princomp_inv(self, princip_data1, princip_data2, coeff, shift):
'''Takes the inverse of the principal component rotation given data,
coefficients, and shift. Used in the EA and getSamples functions.
Parameters
----------
princip_data1: np.array
Array of Component 1 values.
princip_data2: np.array
Array of Component 2 values.
coeff: np.array
Array of principal component coefficients.
shift: float
Shift applied to Component 2 to make all values positive.
Returns
-------
original1: np.array
Hs values following rotation from principal component space.
original2: np.array
T values following rotation from principal component space.
'''
original1 = np.zeros(len(princip_data1))
original2 = np.zeros(len(princip_data1))
for i in range(len(princip_data2)):
original1[i] = (((coeff[0, 1] * (princip_data2[i] - shift)) +
(coeff[0, 0] * princip_data1[i])) / (coeff[0, 1]**2 +
coeff[0, 0]**2))
original2[i] = (((coeff[0, 1] * princip_data1[i]) -
(coeff[0, 0] * (princip_data2[i] -
shift))) / (coeff[0, 1]**2 + coeff[0, 0]**2))
return original1, original2
def __betafcn(self, sig_p, rho):
'''Penalty calculation for sigma parameter fitting function to impose
positive value constraint.
Parameters
----------
sig_p: np.array
Array of sigma fitting function parameters.
rho: float
Penalty function variable that drives the solution towards
required constraint.
Returns
-------
Beta1: float
Penalty function variable that applies the constraint requiring
the y-intercept of the sigma fitting function to be greater than
or equal to 0.
Beta2: float
Penalty function variable that applies the constraint requiring
the minimum of the sigma fitting function to be greater than or
equal to 0.
'''
if -sig_p[2] <= 0:
Beta1 = 0.0
else:
Beta1 = rho
if -sig_p[2] + (sig_p[1]**2) / (4 * sig_p[0]) <= 0:
Beta2 = 0.0
else:
Beta2 = rho
return Beta1, Beta2
# Sigma function sigma_fcn defined outside of EA function
def __objfun(self, sig_p, x, y_actual):
'''Sum of least square error objective function used in sigma
minimization.
Parameters
----------
sig_p: np.array
Array of sigma fitting function parameters.
x: np.array
Array of values (Component 1 mean for each bin) at which to evaluate
the sigma fitting function.
y_actual: np.array
Array of actual sigma values for each bin to use in least
square error calculation with fitted values.
Returns
-------
obj_fun_result: float
Sum of least square error objective function for fitted
and actual values.
'''
obj_fun_result = np.sum((self.__sigma_fcn(sig_p, x) - y_actual)**2)
return obj_fun_result # Sum of least square error
def __objfun_penalty(self, sig_p, x, y_actual, Beta1, Beta2):
'''Penalty function used for sigma function constrained optimization.
Parameters
----------
sig_p: np.array
Array of sigma fitting function parameters.
x: np.array
Array of values (Component 1 mean for each bin) at which to evaluate
the sigma fitting function.
y_actual: np.array
Array of actual sigma values for each bin to use in least
square error calculation with fitted values.
Beta1: float
Penalty function variable that applies the constraint requiring
the y-intercept of the sigma fitting function to be greater than
or equal to 0.
Beta2: float
Penalty function variable that applies the constraint requiring
the minimum of the sigma fitting function to be greater than or
equal to 0.
Returns
-------
penalty_fcn: float
Objective function result with constraint penalties
applied for out of bound solutions.
'''
penalty_fcn = (self.__objfun(sig_p, x, y_actual) + Beta1 * (-sig_p[2])**2 +
Beta2 * (-sig_p[2] + (sig_p[1]**2) / (4 * sig_p[0]))**2)
return penalty_fcn
def __sigma_fits(self, Comp1_mean, sigma_vals):
'''Sigma parameter fitting function using penalty optimization.
Parameters
----------
Comp1_mean: np.array
Mean value of Component 1 for each bin of Component 2.
sigma_vals: np.array
Value of Component 2 sigma for each bin derived from normal
distribution fit.
Returns
-------
sig_final: np.array
Final sigma parameter values after constrained optimization.
'''
sig_0 = np.array((0.1, 0.1, 0.1)) # Set initial guess
rho = 1.0 # Set initial penalty value
# Set tolerance, very small values (i.e.,smaller than 10^-5) may cause
# instabilities
epsilon = 10**-5
# Set inital beta values using beta function
Beta1, Beta2 = self.__betafcn(sig_0, rho)
# Initial search for minimum value using initial guess
sig_1 = optim.fmin(func=self.__objfun_penalty, x0=sig_0,
args=(Comp1_mean, sigma_vals, Beta1, Beta2), disp=False)
# While either the difference between iterations or the difference in
# objective function evaluation is greater than the tolerance, continue
# iterating
while (np.amin(abs(sig_1 - sig_0)) > epsilon and
abs(self.__objfun(sig_1, Comp1_mean, sigma_vals) -
self.__objfun(sig_0, Comp1_mean, sigma_vals)) > epsilon):
sig_0 = sig_1
# Calculate penalties for this iteration
Beta1, Beta2 = self.__betafcn(sig_0, rho)
# Find a new minimum
sig_1 = optim.fmin(func=self.__objfun_penalty, x0=sig_0,
args=(Comp1_mean, sigma_vals, Beta1, Beta2), disp=False)
rho = 10 * rho # Increase penalization
sig_final = sig_1
return sig_final
class GaussianCopula(EA):
'''Create a GaussianCopula EA class for a buoy object. Contours generated
under this class will use a Gaussian copula.'''
def __init__(self, buoy, n_size=40., bin_1_limit=1., bin_step=0.25):
'''
Parameters
----------
buoy : NDBCData
ESSC.Buoy Object
n_size: float
minimum bin size used for Copula contour methods
bin_1_limit: float
maximum value of Hs for the first bin
bin_step: float
overlap interval for each bin
'''
self.method = "Gaussian Copula"
self.buoy = buoy
self.n_size = n_size
self.bin_1_limit = bin_1_limit
self.bin_step = bin_step
self.Hs_ReturnContours = None
# self.Hs_SampleCA = None
# self.Hs_SampleFSS = None
self.T_ReturnContours = None
# self.T_SampleCA = None
# self.T_SampleFSS = None
# self.Weight_points = None
# self.coeff, self.shift, self.comp1_params, self.sigma_param, self.mu_param = self.__generateParams(size_bin)
self.para_dist_1,self.para_dist_2,self.mean_cond,self.std_cond = self._EA__getCopulaParams(n_size,bin_1_limit,bin_step)
def getContours(self, time_ss, time_r, nb_steps = 1000):
'''WDRT Extreme Sea State Gaussian Copula Contour function.
This function calculates environmental contours of extreme sea states using
a Gaussian copula and the inverse first-order reliability
method.
Parameters
___________
time_ss : float
Sea state duration (hours) of measurements in input.
time_r : np.array
Desired return period (years) for calculation of environmental
contour, can be a scalar or a vector.
nb_steps : float
Discretization of the circle in the normal space used for
inverse FORM calculation.
Returns
-------
Hs_Return : np.array
Calculated Hs values along the contour boundary following
return to original input orientation.
T_Return : np.array
Calculated T values along the contour boundary following
return to original input orientation.
nb_steps : float
Discretization of the circle in the normal space
Example
-------
To obtain the contours for a NDBC buoy::
import WDRT.ESSC as ESSC
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create Environtmal Analysis object using above parameters
Gauss46022 = ESSC.GaussianCopula(buoy46022)
# Declare required parameters
Time_SS = 1. # Sea state duration (hrs)
Time_r = 100 # Return periods (yrs) of interest
nb_steps = 1000 # Enter discretization of the circle in the normal space (optional)
# Gaussian copula contour generation example
Hs_Return, T_Return = Gauss46022.getContours(Time_SS, Time_r,nb_steps)
'''
self.time_ss = time_ss
self.time_r = time_r
self.nb_steps = nb_steps
p_f = 1 / (365 * (24 / time_ss) * time_r)
beta = stats.norm.ppf((1 - p_f), loc=0, scale=1) # Reliability
theta = np.linspace(0, 2 * np.pi, num = nb_steps)
# Vary U1, U2 along circle sqrt(U1^2+U2^2)=beta
U1 = beta * np.cos(theta)
U2 = beta * np.sin(theta)
comp_1 = stats.exponweib.ppf(stats.norm.cdf(U1),a=self.para_dist_1[0],c=self.para_dist_1[1],loc=self.para_dist_1[2],scale=self.para_dist_1[3])
tau = stats.kendalltau(self.buoy.T,self.buoy.Hs)[0] # Calculate Kendall's tau
rho_gau=np.sin(tau*np.pi/2.)
z2_Gau=stats.norm.cdf(U2*np.sqrt(1.-rho_gau**2.)+rho_gau*U1);
comp_2_Gaussian = stats.lognorm.ppf(z2_Gau,s=self.para_dist_2[1],loc=0,scale=np.exp(self.para_dist_2[0])) #lognormalinverse
Hs_Return = comp_1
T_Return = comp_2_Gaussian
self.Hs_ReturnContours = Hs_Return
self.T_ReturnContours = T_Return
return Hs_Return, T_Return
def getSamples(self):
'''Currently not implemented in this version.'''
raise NotImplementedError
def _saveParams(self, groupObj):
groupObj.create_dataset('n_size', data=self.n_size)
groupObj.create_dataset('bin_1_limit', data=self.bin_1_limit)
groupObj.create_dataset('bin_step', data=self.bin_step)
groupObj.create_dataset('para_dist_1', data=self.para_dist_1)
groupObj.create_dataset('para_dist_2', data=self.para_dist_2)
groupObj.create_dataset('mean_cond', data=self.mean_cond)
groupObj.create_dataset('std_cond', data=self.std_cond)
class Rosenblatt(EA):
'''Create a Rosenblatt EA class for a buoy object. Contours generated
under this class will use a Rosenblatt transformation and the I-FORM.'''
def __init__(self, buoy, n_size=50., bin_1_limit= .5, bin_step=0.25):
'''
Parameters
----------
buoy : NDBCData
ESSC.Buoy Object
n_size: float
minimum bin size used for Copula contour methods
bin_1_limit: float
maximum value of Hs for the first bin
bin_step: float
overlap interval for each bin
'''
self.method = "Rosenblatt"
self.buoy = buoy
if n_size > 100:
self.n_size = 100
print(100,'is the maximum "minimum bin size" for this buoy. The minimum bin size has been set to this amount.')
else:
self.n_size = n_size
if bin_step > max(buoy.Hs)*.1:
self.bin_step = max(buoy.Hs)*.1
print(round(max(buoy.Hs)*.1,2),'is the maximum bin overlap for this buoy. The bin overlap has been set to this amount.')
else:
self.bin_step = bin_step
if bin_1_limit > max(buoy.Hs)*.25:
self.bin_1_limit = max(buoy.Hs)*.25
print(round(max(buoy.Hs)*.25,2),'is the maximum limit for the first for this buoy. The first bin limit has been set to this amount.')
else:
self.bin_1_limit = bin_1_limit
self.Hs_ReturnContours = None
# self.Hs_SampleCA = None
# self.Hs_SampleFSS = None
self.T_ReturnContours = None
# self.T_SampleCA = None
# self.T_SampleFSS = None
# self.Weight_points = None
# self.coeff, self.shift, self.comp1_params, self.sigma_param, self.mu_param = self.__generateParams(size_bin)
self.para_dist_1,self.para_dist_2,self.mean_cond,self.std_cond = self._EA__getCopulaParams(self.n_size,self.bin_1_limit,self.bin_step)
def getContours(self, time_ss, time_r, nb_steps = 1000):
'''WDRT Extreme Sea State Rosenblatt Copula Contour function.
This function calculates environmental contours of extreme sea states using
a Rosenblatt transformation and the inverse first-order reliability
method.
Parameters
___________
time_ss : float
Sea state duration (hours) of measurements in input.
time_r : np.array
Desired return period (years) for calculation of environmental
contour, can be a scalar or a vector.
nb_steps : float
Discretization of the circle in the normal space used for
inverse FORM calculation.
Returns
-------
Hs_Return : np.array
Calculated Hs values along the contour boundary following
return to original input orientation.
T_Return : np.array
Calculated T values along the contour boundary following
return to original input orientation.
nb_steps : float
Discretization of the circle in the normal space
Example
-------
To obtain the contours for a NDBC buoy::
import WDRT.ESSC as ESSC
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create Environtmal Analysis object using above parameters
Rosen46022 = ESSC.Rosenblatt(buoy46022)
# Declare required parameters
Time_SS = 1. # Sea state duration (hrs)
Time_r = 100 # Return periods (yrs) of interest
nb_steps = 1000 # Enter discretization of the circle in the normal space (optional)
# Rosenblatt contour generation example
Hs_Return, T_Return = Rosen46022.getContours(Time_SS, Time_r,nb_steps)
'''
self.time_ss = time_ss
self.time_r = time_r
self.nb_steps = nb_steps
p_f = 1 / (365 * (24 / time_ss) * time_r)
beta = stats.norm.ppf((1 - p_f), loc=0, scale=1) # Reliability
theta = np.linspace(0, 2 * np.pi, num = nb_steps)
# Vary U1, U2 along circle sqrt(U1^2+U2^2)=beta
U1 = beta * np.cos(theta)
U2 = beta * np.sin(theta)
comp_1 = stats.exponweib.ppf(stats.norm.cdf(U1),a=self.para_dist_1[0],c=self.para_dist_1[1],loc=self.para_dist_1[2],scale=self.para_dist_1[3])
lamda_cond=self.mean_cond[0]+self.mean_cond[1]*comp_1+self.mean_cond[2]*comp_1**2+self.mean_cond[3]*comp_1**3 # mean of Ln(T) as a function of Hs
sigma_cond=self.std_cond[0]+self.std_cond[1]*comp_1+self.std_cond[2]*comp_1**2 # Standard deviation of Ln(T) as a function of Hs
comp_2_Rosenblatt = stats.lognorm.ppf(stats.norm.cdf(U2),s=sigma_cond,loc=0,scale=np.exp(lamda_cond)) # lognormal inverse
Hs_Return = comp_1
T_Return = comp_2_Rosenblatt
self.Hs_ReturnContours = Hs_Return
self.T_ReturnContours = T_Return
return Hs_Return, T_Return
def getSamples(self):
'''Currently not implemented in this version'''
raise NotImplementedError
def _saveParams(self, groupObj):
groupObj.create_dataset('n_size', data=self.n_size)
groupObj.create_dataset('bin_1_limit', data=self.bin_1_limit)
groupObj.create_dataset('bin_step', data=self.bin_step)
groupObj.create_dataset('para_dist_1', data=self.para_dist_1)
groupObj.create_dataset('para_dist_2', data=self.para_dist_2)
groupObj.create_dataset('mean_cond', data=self.mean_cond)
groupObj.create_dataset('std_cond', data=self.std_cond)
class ClaytonCopula(EA):
'''Create a ClaytonCopula EA class for a buoy object. Contours generated
under this class will use a Clayton copula.'''
def __init__(self, buoy, n_size=40., bin_1_limit=1., bin_step=0.25):
'''
Parameters
----------
buoy : NDBCData
ESSC.Buoy Object
n_size: float
minimum bin size used for Copula contour methods
bin_1_limit: float
maximum value of Hs for the first bin
bin_step: float
overlap interval for each bin
'''
self.method = "Clayton Copula"
self.buoy = buoy
self.n_size = n_size
self.bin_1_limit = bin_1_limit
self.bin_step = bin_step
self.Hs_ReturnContours = None
# self.Hs_SampleCA = None
# self.Hs_SampleFSS = None
self.T_ReturnContours = None
# self.T_SampleCA = None
# self.T_SampleFSS = None
# self.Weight_points = None
# self.coeff, self.shift, self.comp1_params, self.sigma_param, self.mu_param = self.__generateParams(size_bin)
self.para_dist_1,self.para_dist_2,self.mean_cond,self.std_cond = self._EA__getCopulaParams(n_size,bin_1_limit,bin_step)
def getContours(self, time_ss, time_r, nb_steps = 1000):
'''WDRT Extreme Sea State Clayton Copula Contour function.
This function calculates environmental contours of extreme sea states using
a Clayton copula and the inverse first-order reliability
method.
Parameters
----------
time_ss : float
Sea state duration (hours) of measurements in input.
time_r : np.array
Desired return period (years) for calculation of environmental
contour, can be a scalar or a vector.
nb_steps : float
Discretization of the circle in the normal space used for
inverse FORM calculation.
Returns
-------
Hs_Return : np.array
Calculated Hs values along the contour boundary following
return to original input orientation.
T_Return : np.array
Calculated T values along the contour boundary following
return to original input orientation.
nb_steps : float
Discretization of the circle in the normal space
Example
-------
To obtain the contours for a NDBC buoy::
import WDRT.ESSC as ESSC
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create Environtmal Analysis object using above parameters
Clayton46022 = ESSC.ClaytonCopula(buoy46022)
# Declare required parameters
Time_SS = 1. # Sea state duration (hrs)
Time_r = 100 # Return periods (yrs) of interest
nb_steps = 1000 # Enter discretization of the circle in the normal space (optional)
# Clayton copula contour generation example
Hs_Return, T_Return = Clayton46022.getContours(Time_SS, Time_r,nb_steps)
'''
self.time_ss = time_ss
self.time_r = time_r
self.nb_steps = nb_steps
p_f = 1 / (365 * (24 / time_ss) * time_r)
beta = stats.norm.ppf((1 - p_f), loc=0, scale=1) # Reliability
theta = np.linspace(0, 2 * np.pi, num = nb_steps)
# Vary U1, U2 along circle sqrt(U1^2+U2^2)=beta
U1 = beta * np.cos(theta)
U2 = beta * np.sin(theta)
comp_1 = stats.exponweib.ppf(stats.norm.cdf(U1),a=self.para_dist_1[0],c=self.para_dist_1[1],loc=self.para_dist_1[2],scale=self.para_dist_1[3])
tau = stats.kendalltau(self.buoy.T,self.buoy.Hs)[0] # Calculate Kendall's tau
theta_clay = (2.*tau)/(1.-tau)
z2_Clay=((1.-stats.norm.cdf(U1)**(-theta_clay)+stats.norm.cdf(U1)**(-theta_clay)/stats.norm.cdf(U2))**(theta_clay/(1.+theta_clay)))**(-1./theta_clay)
comp_2_Clayton = stats.lognorm.ppf(z2_Clay,s=self.para_dist_2[1],loc=0,scale=np.exp(self.para_dist_2[0])) #lognormalinverse
Hs_Return = comp_1
T_Return = comp_2_Clayton
self.Hs_ReturnContours = Hs_Return
self.T_ReturnContours = T_Return
return Hs_Return, T_Return
def getSamples(self):
'''Currently not implemented in this version'''
raise NotImplementedError
def _saveParams(self, groupObj):
groupObj.create_dataset('n_size', data=self.n_size)
groupObj.create_dataset('bin_1_limit', data=self.bin_1_limit)
groupObj.create_dataset('bin_step', data=self.bin_step)
groupObj.create_dataset('para_dist_1', data=self.para_dist_1)
groupObj.create_dataset('para_dist_2', data=self.para_dist_2)
groupObj.create_dataset('mean_cond', data=self.mean_cond)
groupObj.create_dataset('std_cond', data=self.std_cond)
class GumbelCopula(EA):
'''Create a GumbelCopula EA class for a buoy object. Contours generated
under this class will use a Gumbel copula.'''
def __init__(self, buoy, n_size=40., bin_1_limit=1., bin_step=0.25,Ndata = 1000):
'''
Parameters
----------
buoy : NDBCData
ESSC.Buoy Object
n_size: float
minimum bin size used for Copula contour methods
bin_1_limit: float
maximum value of Hs for the first bin
bin_step: float
overlap interval for each bin
Ndata: int
discretization used in the Gumbel copula density estimation,
must be less than the number of contour points used in
getContours
'''
self.method = "Gumbel Copula"
self.buoy = buoy
self.n_size = n_size
self.bin_1_limit = bin_1_limit
self.bin_step = bin_step
self.Hs_ReturnContours = None
# self.Hs_SampleCA = None
# self.Hs_SampleFSS = None
self.T_ReturnContours = None
# self.T_SampleCA = None
# self.T_SampleFSS = None
# self.Weight_points = None
# self.coeff, self.shift, self.comp1_params, self.sigma_param, self.mu_param = self.__generateParams(size_bin)
self.Ndata = Ndata
self.min_limit_2 = 0.
self.max_limit_2 = np.ceil(np.amax(self.buoy.T)*2)
self.para_dist_1,self.para_dist_2,self.mean_cond,self.std_cond = self._EA__getCopulaParams(n_size,bin_1_limit,bin_step)
def getContours(self, time_ss, time_r, nb_steps = 1000):
'''WDRT Extreme Sea State Gumbel Copula Contour function
This function calculates environmental contours of extreme sea states using
a Gumbel copula and the inverse first-order reliability
method.
Parameters
___________
time_ss : float
Sea state duration (hours) of measurements in input.
time_r : np.array
Desired return period (years) for calculation of environmental
contour, can be a scalar or a vector.
nb_steps : float
Discretization of the circle in the normal space used for
inverse FORM calculation.
Returns
-------
Hs_Return : np.array
Calculated Hs values along the contour boundary following
return to original input orientation.
T_Return : np.array
Calculated T values along the contour boundary following
return to original input orientation.
nb_steps : float
Discretization of the circle in the normal space
Example
-------
To obtain the contours for a NDBC buoy::
import WDRT.ESSC as ESSC
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create Environtmal Analysis object using above parameters
Gumbel46022 = ESSC.GumbelCopula(buoy46022)
# Declare required parameters
Time_SS = 1. # Sea state duration (hrs)
Time_r = 100 # Return periods (yrs) of interest
nb_steps = 1000 # Enter discretization of the circle in the normal space (optional)
# Gumbel copula contour generation example
Hs_Return, T_Return = Gumbel46022.getContours(Time_SS,Time_r,nb_steps)
'''
self.time_ss = time_ss
self.time_r = time_r
self.nb_steps = nb_steps
p_f = 1 / (365 * (24 / time_ss) * time_r)
beta = stats.norm.ppf((1 - p_f), loc=0, scale=1) # Reliability
theta = np.linspace(0, 2 * np.pi, num = nb_steps)
# Vary U1, U2 along circle sqrt(U1^2+U2^2)=beta
U1 = beta * np.cos(theta)
U2 = beta * np.sin(theta)
comp_1 = stats.exponweib.ppf(stats.norm.cdf(U1),a=self.para_dist_1[0],c=self.para_dist_1[1],loc=self.para_dist_1[2],scale=self.para_dist_1[3])
tau = stats.kendalltau(self.buoy.T,self.buoy.Hs)[0] # Calculate Kendall's tau
theta_gum = 1./(1.-tau)
fi_u1=stats.norm.cdf(U1);
fi_u2=stats.norm.cdf(U2);
x2 = np.linspace(self.min_limit_2,self.max_limit_2,self.Ndata)
z2 = stats.lognorm.cdf(x2,s=self.para_dist_2[1],loc=0,scale=np.exp(self.para_dist_2[0]))
comp_2_Gumb = np.zeros(nb_steps)
for k in range(0,int(nb_steps)):
z1 = np.linspace(fi_u1[k],fi_u1[k],self.Ndata)
Z = np.array((z1,z2))
Y = self._EA__gumbelCopula(Z, theta_gum) # Copula density function
Y =np.nan_to_num(Y)
p_x2_x1 = Y*(stats.lognorm.pdf(x2, s = self.para_dist_2[1], loc=0, scale = np.exp(self.para_dist_2[0]))) # pdf 2|1, f(comp_2|comp_1)=c(z1,z2)*f(comp_2)
dum = np.cumsum(p_x2_x1)
cdf = dum/(dum[self.Ndata-1]) # Estimate CDF from PDF
table = np.array((x2, cdf)) # Result of conditional CDF derived based on Gumbel copula
table = table.T
for j in range(self.Ndata):
if fi_u2[k] <= table[0,1]:
comp_2_Gumb[k] = min(table[:,0])
break
elif fi_u2[k] <= table[j,1]:
comp_2_Gumb[k] = (table[j,0]+table[j-1,0])/2
break
else:
comp_2_Gumb[k] = table[:,0].max()
Hs_Return = comp_1
T_Return = comp_2_Gumb
self.Hs_ReturnContours = Hs_Return
self.T_ReturnContours = T_Return
return Hs_Return, T_Return
def getSamples(self):
'''Currently not implemented in this version.'''
raise NotImplementedError
def _saveParams(self, groupObj):
groupObj.create_dataset('Ndata', data=self.Ndata)
groupObj.create_dataset('min_limit_2', data=self.min_limit_2)
groupObj.create_dataset('max_limit_2', data=self.max_limit_2)
groupObj.create_dataset('n_size', data=self.n_size)
groupObj.create_dataset('bin_1_limit', data=self.bin_1_limit)
groupObj.create_dataset('bin_step', data=self.bin_step)
groupObj.create_dataset('para_dist_1', data=self.para_dist_1)
groupObj.create_dataset('para_dist_2', data=self.para_dist_2)
groupObj.create_dataset('mean_cond', data=self.mean_cond)
groupObj.create_dataset('std_cond', data=self.std_cond)
class NonParaGaussianCopula(EA):
'''Create a NonParaGaussianCopula EA class for a buoy object. Contours
generated under this class will use a Gaussian copula with non-parametric
marginal distribution fits.'''
def __init__(self, buoy, Ndata = 1000, max_T=None, max_Hs=None):
'''
Parameters
----------
buoy : NDBCData
ESSC.Buoy Object
NData: int
discretization resolution used in KDE construction
max_T:float
Maximum T value for KDE contstruction, must include possible
range of contour. Default value is 2*max(T)
max_Hs:float
Maximum Hs value for KDE contstruction, must include possible
range of contour. Default value is 2*max(Hs)
'''
self.method = "Non-parametric Gaussian Copula"
self.buoy = buoy
self.Ndata = Ndata
self.Hs_ReturnContours = None
# self.Hs_SampleCA = None
# self.Hs_SampleFSS = None
self.T_ReturnContours = None
# self.T_SampleCA = None
# self.T_SampleFSS = None
# self.Weight_points = None
# self.coeff, self.shift, self.comp1_params, self.sigma_param, self.mu_param = self.__generateParams(size_bin)
if max_T == None:
max_T = max(self.buoy.T)*2.
if max_Hs == None:
max_Hs = max(self.buoy.Hs)*2.
self.max_T = max_T
self.max_Hs = max_Hs
self.nonpara_dist_1,self.nonpara_dist_2,self.nonpara_pdf_2 = self._EA__getNonParaCopulaParams(Ndata,max_T,max_Hs)
def getContours(self, time_ss, time_r, nb_steps = 1000):
'''WDRT Extreme Sea State Gaussian Copula Contour function.
This function calculates environmental contours of extreme sea states
using a Gaussian copula with non-parametric marginal distribution fits
and the inverse first-order reliability method.
Parameters
___________
time_ss : float
Sea state duration (hours) of measurements in input.
time_r : np.array
Desired return period (years) for calculation of environmental
contour, can be a scalar or a vector.
nb_steps : float
Discretization of the circle in the normal space used for
inverse FORM calculation.
Returns
-------
Hs_Return : np.array
Calculated Hs values along the contour boundary following
return to original input orientation.
T_Return : np.array
Calculated T values along the contour boundary following
return to original input orientation.
nb_steps : float
Discretization of the circle in the normal space
Example
-------
To obtain the contours for a NDBC buoy::
import WDRT.ESSC as ESSC
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create Environtmal Analysis object using above parameters
NonParaGauss46022 = ESSC.NonParaGaussianCopula(buoy46022)
# Declare required parameters
Time_SS = 1. # Sea state duration (hrs)
Time_r = 100 # Return periods (yrs) of interest
nb_steps = 1000 # Enter discretization of the circle in the normal space (optional)
# Non-Parametric Gaussian copula contour generation example
Hs_Return, T_Return = NonParaGauss46022.getContours(Time_SS, Time_r,nb_steps)
'''
self.time_ss = time_ss
self.time_r = time_r
self.nb_steps = nb_steps
comp_1 = np.zeros(nb_steps)
comp_2_Gau = np.zeros(nb_steps)
# Inverse FORM
p_f = 1 / (365 * (24 / time_ss) * time_r)
beta = stats.norm.ppf((1 - p_f), loc=0, scale=1) # Reliability
# Normal Space
theta = np.linspace(0, 2 * np.pi, num = nb_steps)
U1 = beta * np.cos(theta)
U2 = beta * np.sin(theta)
# Copula parameters
tau = stats.kendalltau(self.buoy.T,self.buoy.Hs)[0]# Calculate Kendall's tau
rho_gau=np.sin(tau*np.pi/2.);
# Component 1 (Hs)
z1_Hs = stats.norm.cdf(U1)
for k in range(0,nb_steps):
for j in range(0,np.size(self.nonpara_dist_1,0)):
if z1_Hs[k] <= self.nonpara_dist_1[0,1]:
comp_1[k] = min(self.nonpara_dist_1[:,0])
break
elif z1_Hs[k] <= self.nonpara_dist_1[j,1]:
comp_1[k] = (self.nonpara_dist_1[j,0] + self.nonpara_dist_1[j-1,0])/2
break
else:
comp_1[k]= max(self.nonpara_dist_1[:,0])
# Component 2 (T)
z2_Gau=stats.norm.cdf(U2*np.sqrt(1.-rho_gau**2.)+rho_gau*U1);
for k in range(0,nb_steps):
for j in range(0,np.size(self.nonpara_dist_2,0)):
if z2_Gau[k] <= self.nonpara_dist_2[0,1]:
comp_2_Gau[k] = min(self.nonpara_dist_2[:,0])
break
elif z2_Gau[k] <= self.nonpara_dist_2[j,1]:
comp_2_Gau[k] = (self.nonpara_dist_2[j,0] + self.nonpara_dist_2[j-1,0])/2
break
else:
comp_2_Gau[k]= max(self.nonpara_dist_2[:,0])
Hs_Return = comp_1
T_Return = comp_2_Gau
self.Hs_ReturnContours = Hs_Return
self.T_ReturnContours = T_Return
return Hs_Return, T_Return
def getSamples(self):
'''Currently not implemented in this version.'''
raise NotImplementedError
def _saveParams(self, groupObj):
groupObj.create_dataset('nonpara_dist_1', data=self.nonpara_dist_1)
groupObj.create_dataset('nonpara_dist_2', data=self.nonpara_dist_2)
class NonParaClaytonCopula(EA):
'''Create a NonParaClaytonCopula EA class for a buoy object. Contours
generated under this class will use a Clayton copula with non-parametric
marginal distribution fits.'''
def __init__(self, buoy, Ndata = 1000, max_T=None, max_Hs=None):
'''
Parameters
----------
buoy : NDBCData
ESSC.Buoy Object
NData: int
discretization resolution used in KDE construction
max_T:float
Maximum T value for KDE contstruction, must include possible
range of contour. Default value is 2*max(T)
max_Hs:float
Maximum Hs value for KDE contstruction, must include possible
range of contour. Default value is 2*max(Hs)
'''
self.method = "Non-parametric Clayton Copula"
self.buoy = buoy
self.Ndata = Ndata
self.Hs_ReturnContours = None
# self.Hs_SampleCA = None
# self.Hs_SampleFSS = None
self.T_ReturnContours = None
# self.T_SampleCA = None
# self.T_SampleFSS = None
# self.Weight_points = None
# self.coeff, self.shift, self.comp1_params, self.sigma_param, self.mu_param = self.__generateParams(size_bin)
if max_T == None:
max_T = max(self.buoy.T)*2.
if max_Hs == None:
max_Hs = max(self.buoy.Hs)*2.
self.max_T = max_T
self.max_Hs = max_Hs
self.nonpara_dist_1,self.nonpara_dist_2,self.nonpara_pdf_2 = self._EA__getNonParaCopulaParams(Ndata,max_T,max_Hs)
def getContours(self, time_ss, time_r, nb_steps = 1000):
'''WDRT Extreme Sea State non-parameteric Clayton Copula Contour
function. This function calculates environmental contours of extreme
sea states using a Clayton copula with non-parametric marginal
distribution fits and the inverse first-order reliability method.
Parameters
___________
time_ss : float
Sea state duration (hours) of measurements in input.
time_r : np.array
Desired return period (years) for calculation of environmental
contour, can be a scalar or a vector.
nb_steps : float
Discretization of the circle in the normal space used for
inverse FORM calculation.
Returns
-------
Hs_Return : np.array
Calculated Hs values along the contour boundary following
return to original input orientation.
T_Return : np.array
Calculated T values along the contour boundary following
return to original input orientation.
nb_steps : float
Discretization of the circle in the normal space
Example
-------
To obtain the contours for a NDBC buoy::
import WDRT.ESSC as ESSC
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create Environtmal Analysis object using above parameters
NonParaClayton46022 = ESSC.NonParaClaytonCopula(buoy46022)
# Declare required parameters
Time_SS = 1. # Sea state duration (hrs)
Time_r = 100 # Return periods (yrs) of interest
nb_steps = 1000 # Enter discretization of the circle in the normal space (optional)
# Non-Parametric Clayton copula contour generation example
Hs_Return, T_Return = NonParaClayton46022.getContours(Time_SS, Time_r,nb_steps)
'''
self.time_ss = time_ss
self.time_r = time_r
self.nb_steps = nb_steps
comp_1 = np.zeros(nb_steps)
comp_2_Clay = np.zeros(nb_steps)
# Inverse FORM
p_f = 1 / (365 * (24 / time_ss) * time_r)
beta = stats.norm.ppf((1 - p_f), loc=0, scale=1) # Reliability
# Normal Space
theta = np.linspace(0, 2 * np.pi, num = nb_steps)
U1 = beta * np.cos(theta)
U2 = beta * np.sin(theta)
# Copula parameters
tau = stats.kendalltau(self.buoy.T,self.buoy.Hs)[0]# Calculate Kendall's tau
theta_clay = (2.*tau)/(1.-tau);
# Component 1 (Hs)
z1_Hs = stats.norm.cdf(U1)
for k in range(0,nb_steps):
for j in range(0,np.size(self.nonpara_dist_1,0)):
if z1_Hs[k] <= self.nonpara_dist_1[0,1]:
comp_1[k] = min(self.nonpara_dist_1[:,0])
break
elif z1_Hs[k] <= self.nonpara_dist_1[j,1]:
comp_1[k] = (self.nonpara_dist_1[j,0] + self.nonpara_dist_1[j-1,0])/2
break
else:
comp_1[k]= max(self.nonpara_dist_1[:,0])
# Component 2 (T)
z2_Clay=((1.-stats.norm.cdf(U1)**(-theta_clay)+stats.norm.cdf(U1)**(-theta_clay)/stats.norm.cdf(U2))**(theta_clay/(1.+theta_clay)))**(-1./theta_clay)
for k in range(0,nb_steps):
for j in range(0,np.size(self.nonpara_dist_2,0)):
if z2_Clay[k] <= self.nonpara_dist_2[0,1]:
comp_2_Clay[k,0] = min(self.nonpara_dist_2[:,0])
break
elif z2_Clay[k] <= self.nonpara_dist_2[j,1]:
comp_2_Clay[k] = (self.nonpara_dist_2[j,0] + self.nonpara_dist_2[j-1,0])/2
break
else:
comp_2_Clay[k]= max(self.nonpara_dist_2[:,0])
Hs_Return = comp_1
T_Return = comp_2_Clay
self.Hs_ReturnContours = Hs_Return
self.T_ReturnContours = T_Return
return Hs_Return, T_Return
def getSamples(self):
'''Currently not implemented in this version.'''
raise NotImplementedError
def _saveParams(self, groupObj):
groupObj.create_dataset('nonpara_dist_1', data=self.nonpara_dist_1)
groupObj.create_dataset('nonpara_dist_2', data=self.nonpara_dist_2)
class NonParaGumbelCopula(EA):
'''Create a NonParaGumbelCopula EA class for a buoy object. Contours
generated under this class will use a Gumbel copula with non-parametric
marginal distribution fits.'''
def __init__(self, buoy, Ndata = 1000, max_T=None, max_Hs=None):
'''
Parameters
----------
buoy : NDBCData
ESSC.Buoy Object
NData: int
discretization resolution used in KDE construction
max_T:float
Maximum T value for KDE contstruction, must include possible
range of contour. Default value is 2*max(T)
max_Hs:float
Maximum Hs value for KDE contstruction, must include possible
range of contour. Default value is 2*max(Hs)
'''
self.method = "Non-parametric Gumbel Copula"
self.buoy = buoy
self.Ndata = Ndata
self.Hs_ReturnContours = None
# self.Hs_SampleCA = None
# self.Hs_SampleFSS = None
self.T_ReturnContours = None
# self.T_SampleCA = None
# self.T_SampleFSS = None
# self.Weight_points = None
# self.coeff, self.shift, self.comp1_params, self.sigma_param, self.mu_param = self.__generateParams(size_bin)
if max_T == None:
max_T = max(self.buoy.T)*2.
if max_Hs == None:
max_Hs = max(self.buoy.Hs)*2.
self.max_T = max_T
self.max_Hs = max_Hs
self.nonpara_dist_1,self.nonpara_dist_2,self.nonpara_pdf_2 = self._EA__getNonParaCopulaParams(Ndata,max_T,max_Hs)
def getContours(self, time_ss, time_r, nb_steps = 1000):
'''WDRT Extreme Sea State non-parameteric Gumbel Copula Contour
function. This function calculates environmental contours of extreme
sea states using a Gumbel copula with non-parametric marginal
distribution fits and the inverse first-order reliability method.
Parameters
___________
time_ss : float
Sea state duration (hours) of measurements in input.
time_r : np.array
Desired return period (years) for calculation of environmental
contour, can be a scalar or a vector.
nb_steps : float
Discretization of the circle in the normal space used for
inverse FORM calculation.
Returns
-------
Hs_Return : np.array
Calculated Hs values along the contour boundary following
return to original input orientation.
T_Return : np.array
Calculated T values along the contour boundary following
return to original input orientation.
nb_steps : float
Discretization of the circle in the normal space
Example
-------
To obtain the contours for a NDBC buoy::
import WDRT.ESSC as ESSC
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create Environtmal Analysis object using above parameters
NonParaGumbel46022 = ESSC.NonParaGumbelCopula(buoy46022)
# Declare required parameters
Time_SS = 1. # Sea state duration (hrs)
Time_r = 100 # Return periods (yrs) of interest
nb_steps = 1000 # Enter discretization of the circle in the normal space (optional)
# Non-Parametric Gumbel copula contour generation example
Hs_Return, T_Return = NonParaGumbel46022.getContours(Time_SS, Time_r,nb_steps)
'''
self.time_ss = time_ss
self.time_r = time_r
self.nb_steps = nb_steps
comp_1 = np.zeros(nb_steps)
comp_2_Gumb = np.zeros(nb_steps)
# Inverse FORM
p_f = 1 / (365 * (24 / time_ss) * time_r)
beta = stats.norm.ppf((1 - p_f), loc=0, scale=1) # Reliability
# Normal Space
theta = np.linspace(0, 2 * np.pi, num = nb_steps)
U1 = beta * np.cos(theta)
U2 = beta * np.sin(theta)
# Copula parameters
tau = stats.kendalltau(self.buoy.T,self.buoy.Hs)[0]# Calculate Kendall's tau
theta_gum = 1./(1.-tau);
# Component 1 (Hs)
z1_Hs = stats.norm.cdf(U1)
for k in range(0,nb_steps):
for j in range(0,np.size(self.nonpara_dist_1,0)):
if z1_Hs[k] <= self.nonpara_dist_1[0,1]:
comp_1[k] = min(self.nonpara_dist_1[:,0])
break
elif z1_Hs[k] <= self.nonpara_dist_1[j,1]:
comp_1[k] = (self.nonpara_dist_1[j,0] + self.nonpara_dist_1[j-1,0])/2
break
else:
comp_1[k]= max(self.nonpara_dist_1[:,0])
# Component 2 (T)
fi_u1=stats.norm.cdf(U1);
fi_u2=stats.norm.cdf(U2);
for k in range(0,nb_steps):
z1 = np.linspace(fi_u1[k],fi_u1[k],self.Ndata)
Z = np.array((np.transpose(z1),self.nonpara_dist_2[:,1]))
Y = self._EA__gumbelCopula(Z, theta_gum)
Y =np.nan_to_num(Y) # Need to look into this
p_x2_x1 = Y*self.nonpara_pdf_2[:,1]
dum = np.cumsum(p_x2_x1)
cdf = dum/(dum[self.Ndata-1])
table = np.array((self.nonpara_pdf_2[:,0], cdf))
table = table.T
for j in range(self.Ndata):
if fi_u2[k] <= table[0,1]:
comp_2_Gumb[k] = min(table[:,0])
break
elif fi_u2[k] <= table[j,1]:
comp_2_Gumb[k] = (table[j,0]+table[j-1,0])/2
break
else:
comp_2_Gumb[k] = max(table[:,0])
Hs_Return = comp_1
T_Return = comp_2_Gumb
self.Hs_ReturnContours = Hs_Return
self.T_ReturnContours = T_Return
return Hs_Return, T_Return
def getSamples(self):
'''Currently not implemented in this version.'''
raise NotImplementedError
def _saveParams(self, groupObj):
groupObj.create_dataset('nonpara_dist_1', data=self.nonpara_dist_1)
groupObj.create_dataset('nonpara_dist_2', data=self.nonpara_dist_2)
class BivariateKDE(EA):
'''Create a BivariateKDE EA class for a buoy object. Contours
generated under this class will use a non-parametric KDE to fit the joint distribution.'''
def __init__(self, buoy, bw, NData = 100, logTransform = False, max_T=None, max_Hs=None):
'''
Parameters
----------
buoy : NDBCData
ESSC.Buoy Object
bw: np.array
Array containing KDE bandwidth for Hs and T
NData: int
Discretization resolution used in KDE construction
logTransform: Boolean
Logical. True if log transformation should be taken prior to
KDE construction. Default value is False.
max_T:float
Maximum T value for KDE contstruction, must include possible
range of contour. Default value is 2*max(T)
max_Hs:float
Maximum Hs value for KDE contstruction, must include possible
range of contour. Default value is 2*max(Hs)
'''
if logTransform:
self.method = "Bivariate KDE, Log Transform"
else:
self.method = "Bivariate KDE"
self.buoy = buoy
if max_T == None:
max_T = max(self.buoy.T)*2.
if max_Hs == None:
max_Hs = max(self.buoy.Hs)*2.
self.max_T = max_T
self.max_Hs = max_Hs
self.Hs_ReturnContours = None
self.T_ReturnContours = None
self.NData = NData
self.bw = bw
self.logTransform = logTransform
def getContours(self, time_ss, time_r):
'''WDRT Extreme Sea State non-parameteric bivariate KDE Contour
function. This function calculates environmental contours of extreme
sea states using a bivariate KDE to estimate the joint distribution.
The contour is then calculcated directly from the joint distribution.
Parameters
___________
time_ss : float
Sea state duration (hours) of measurements in input.
time_r : np.array
Desired return period (years) for calculation of environmental
contour, can be a scalar or a vector.
Returns
-------
Hs_Return : np.array
Calculated Hs values along the contour boundary following
return to original input orientation.
T_Return : np.array
Calculated T values along the contour boundary following
return to original input orientation.
Example
-------
To obtain the contours for a NDBC buoy::
import WDRT.ESSC as ESSC
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create Environmental Analysis object using above parameters
BivariateKDE46022 = ESSC.BivariateKDE(buoy46022, bw = [0.23,0.19], logTransform = False)
# Declare required parameters
Time_SS = 1. # Sea state duration (hrs)
Time_r = 100 # Return periods (yrs) of interest
# KDE contour generation example
Hs_Return, T_Return = BivariateKDE46022.getContours(Time_SS, Time_r)
'''
p_f = 1 / (365 * (24 / time_ss) * time_r)
if self.logTransform:
# Take log of both variables
logTp = np.log(self.buoy.T)
logHs = np.log(self.buoy.Hs)
ty = [logTp, logHs]
else:
ty = [self.buoy.T, self.buoy.Hs]
# Create grid of points
Ndata = self.NData
min_limit_1 = 0.01
max_limit_1 = self.max_T
min_limit_2 = 0.01
max_limit_2 = self.max_Hs
pts_tp = np.linspace(min_limit_1, max_limit_1, Ndata)
pts_hs = np.linspace(min_limit_2, max_limit_2, Ndata)
pt1,pt2 = np.meshgrid(pts_tp, pts_hs)
pts_tp = pt1.flatten()
pts_hs = pt2.flatten()
# Transform gridded points using log
xi = [pts_tp, pts_hs]
if self.logTransform:
txi = [np.log(pts_tp), np.log(pts_hs)]
else:
txi = xi
m = len(txi[0])
n = len(ty[0])
d = 2
# Create contour
f = np.zeros((1,m))
weight = np.ones((1,n))
for i in range(0,m):
ftemp = np.ones((n,1))
for j in range(0,d):
z = (txi[j][i] - ty[j])/self.bw[j]
fk = stats.norm.pdf(z)
if self.logTransform:
fnew = fk*(1/np.transpose(xi[j][i]))
else:
fnew = fk
fnew = np.reshape(fnew, (n,1))
ftemp = np.multiply(ftemp,fnew)
f[:,i] = np.dot(weight,ftemp)
fhat = f.reshape(100,100)
vals = plt.contour(pt1,pt2,fhat, levels = [p_f])
plt.clf()
self.Hs_ReturnContours = []
self.T_ReturnContours = []
for i,seg in enumerate(vals.allsegs[0]):
self.Hs_ReturnContours.append(seg[:,1])
self.T_ReturnContours.append(seg[:,0])
#
self.Hs_ReturnContours = np.transpose(np.asarray(self.Hs_ReturnContours)[0])
self.T_ReturnContours = np.transpose(np.asarray(self.T_ReturnContours)[0])
# self.vals = vals
# contourVals = np.empty((0,2))
# for seg in vals.allsegs[0]:
# contourVals = np.append(contourVals,seg, axis = 0)
# self.Hs_ReturnContours = contourVals[:,1]
# self.T_ReturnContours = contourVals[:,0]
return self.Hs_ReturnContours, self.T_ReturnContours
class Buoy(object):
'''
This class creates a buoy object to store buoy data for use in the
environmental assessment functions available in the ESSC module.
Attributes
__________
swdList : list
List that contains numpy arrays of the spectral wave density data,
separated by year.
freqList: list
List that contains numpy arrays that contain the frequency values
for each year
dateList : list
List that contains numpy arrays of the date values for each line of
spectral data, separated by year
Hs : list
Significant wave height.
T : list
Energy period.
dateNum : list
List of datetime objects.
'''
def __init__(self, buoyNum, buoyType):
'''
Parameters
___________
buoyNum : string
device number for desired buoy
buoyType : string
type of buoy device, available options are 'NDBC' or 'CDIP'
savePath : string
relative path where the data read from ndbc.noaa.gov will be stored
'''
self.swdList = []
self.freqList = []
self.dateList = []
self.Hs = []
self.T = []
self.dateNum = []
self.buoyNum = buoyNum
self.buoyType = buoyType.upper()
def fetchFromWeb(self, savePath = "./Data/",proxy=None):
'''
Calls either __fetchCDIP() or __fetchNDBC() depending on the given
buoy's type and fetches the necessary data from its respective website.
Parameters
----------
saveType: string
If set to to "h5", the data will be saved in a compressed .h5
file
If set to "txt", the data will be stored in a raw .txt file
Otherwise, a file will not be created
NOTE: Only applies
savePath : string
Relative path to place directory with data files.
proxy: dict
Proxy server and port, i.e., {http":"http://proxyserver:port"}
Example
_________
>>> import WDRT.ESSC as ESSC
>>> buoy = ESSC.Buoy('46022','NDBC')
>>> buoy.fetchFromWeb()
'''
if self.buoyType == "NDBC":
self.__fetchNDBC(proxy)
elif self.buoyType == "CDIP":
self.__fetchCDIP(savePath,proxy)
def __fetchNDBC(self, proxy):
'''
Searches ndbc.noaa.gov for the historical spectral wave density
data of a given device and writes the annual files from the website
to a single .txt file, and stores the values in the swdList, freqList,
and dateList member variables.
Parameters
----------
saveType: string
If set to to "h5", the data will be saved in a compressed .h5
file
If set to "txt", the data will be stored in a raw .txt file
Otherwise, a file will not be created
NOTE: Only applies
savePath : string
Relative path to place directory with data files.
'''
maxRecordedDateValues = 4
#preallocates data
numLines = 0
numCols = 0
numDates = 0
dateVals = []
spectralVals = []
#prepares to pull the data from the NDBC website
url = "https://www.ndbc.noaa.gov/station_history.php?station=%s" % (self.buoyNum)
if proxy == None:
ndbcURL = requests.get(url)
else:
ndbcURL = requests.get(url,proxies=proxy)
ndbcURL.raise_for_status()
ndbcHTML = bs4.BeautifulSoup(ndbcURL.text, "lxml")
headers = ndbcHTML.findAll("b", text="Spectral wave density data: ")
#checks for headers in differently formatted webpages
if len(headers) == 0:
raise Exception("Spectral wave density data for buoy #%s not found" % self.buoyNum)
if len(headers) == 2:
headers = headers[1]
else:
headers = headers[0]
links = [a["href"] for a in headers.find_next_siblings("a", href=True)]
#downloads files
for link in links:
dataLink = "https://ndbc.noaa.gov" + link
fileName = dataLink.replace('download_data', 'view_text_file')
data = urllib.request.urlopen(fileName)
print("Reading from:", data.geturl())
#First Line of every file contains the frequency data
frequency = data.readline()
if frequency.split()[4] == b'mm':
numDates = 5
else:
numDates = 4
frequency = np.array(frequency.split()[numDates:], dtype = np.float)
#splits and organizes data into arrays.
for line in data:
currentLine = line.split()
numCols = len(currentLine)
if numCols - numDates != len(frequency):
print("NDBC File is corrupted - Skipping and deleting data")
spectralVals = []
dateVals = []
break
if float(currentLine[numDates+1]) < 999:
numLines += 1
for j in range(maxRecordedDateValues):
dateVals.append(currentLine[j])
for j in range(numCols - numDates):
spectralVals.append(currentLine[j + numDates])
if len(spectralVals) != 0:
dateValues = np.array(dateVals, dtype=np.int)
spectralValues = np.array(spectralVals, dtype=np.float)
dateValues = np.reshape(dateValues, (numLines, maxRecordedDateValues))
spectralValues = np.reshape(spectralValues, (numLines,
(numCols - numDates)))
numLines = 0
numCols = 0
if len(spectralVals) != 0:
del dateVals[:]
del spectralVals[:]
self.swdList.append(spectralValues)
self.freqList.append(frequency)
self.dateList.append(dateValues)
self._prepData()
def loadFromTxt(self, dirPath = None):
'''Loads NDBC data previously downloaded to a series of text files in the
specified directory.
Parameters
----------
dirPath : string
Relative path to directory containing NDBC text files (created by
NBDCdata.fetchFromWeb). If left blank, the method will search
all directories for the data using the current directory as
the root.
Example
-------
To load data from previously downloaded files
created using fetchFromWeb():
import WDRT.ESSC as ESSC
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.loadFromText()
'''
#preallocates arrays
dateVals = []
spectralVals = []
numLines = 0
maxRecordedDateValues = 4
#finds the text files (if they exist on the machine)
if dirPath is None:
for dirpath, subdirs, files in os.walk('.'):
for dirs in subdirs:
if ("NDBC%s" % self.buoyNum) in dirs:
dirPath = os.path.join(dirpath,dirs)
break
if dirPath is None:
raise IOError("Could not find directory containing NDBC data")
fileList = glob.glob(os.path.join(dirPath,'SWD*.txt'))
if len(fileList) == 0:
raise IOError("No NDBC data files found in " + dirPath)
#reads in the files
for fileName in fileList:
print('Reading from: %s' % (fileName))
f = open(fileName, 'r')
frequency = f.readline().split()
numCols = len(frequency)
if frequency[4] == 'mm':
frequency = np.array(frequency[5:], dtype=np.float)
numTimeVals = 5
else:
frequency = np.array(frequency[4:], dtype=np.float)
numTimeVals = 4
for line in f:
currentLine = line.split()
if float(currentLine[numTimeVals + 1]) < 999:
numLines += 1
for i in range(maxRecordedDateValues):
dateVals.append(currentLine[i])
for i in range(numCols - numTimeVals):
spectralVals.append(currentLine[i + numTimeVals])
dateValues = np.array(dateVals, dtype=np.int)
spectralValues = np.array(spectralVals, dtype=np.double)
dateValues = np.reshape(dateValues, (numLines, maxRecordedDateValues))
spectralValues = np.reshape(
spectralValues, (numLines, (numCols - numTimeVals)))
del dateVals[:]
del spectralVals[:]
numLines = 0
numCols = 0
self.swdList.append(spectralValues)
self.freqList.append(frequency)
self.dateList.append(dateValues)
self._prepData()
def loadFile(self, dirPath = None):
'''Loads file depending on whether it's NDBC or CDIP.'''
if self.buoyType == "NDBC":
self.loadFromText(dirPath)
if self.buoyType == "CDIP":
self.loadCDIP(dirPath)
def loadFromH5(self, fileName = None):
"""
Loads NDBC data previously saved in a .h5 file
Parameters
----------
fileName : string
Name of the .h5 file to load data from.
Example
-------
To load data from previously downloaded files:
import WDRT.ESSC as ESSC
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
buoy46022.saveData()
buoy46022.loadFromH5('NDBC46022.h5')
"""
if fileName == None:
fileName = self.buoyType + self.buoyNum + ".h5"
_, file_extension = os.path.splitext(fileName)
if not file_extension:
fileName = fileName + '.h5'
print("Reading from: ", fileName)
try:
f = h5py.File(fileName, 'r')
except IOError:
raise IOError("Could not find file: " + fileName)
self.Hs = np.array(f['buoy_Data/Hs'][:])
self.T = np.array(f['buoy_Data/Te'][:])
self.dateNum = np.array(f['buoy_Data/dateNum'][:])
self.dateList = np.array(f['buoy_Data/dateList'][:])
print("----> SUCCESS")
def saveAsH5(self, fileName=None):
'''
Saves NDBC buoy data to h5 file after fetchFromWeb() or loadFromText().
This data can later be used to create a buoy object using the
loadFromH5() function.
Parameters
----------
fileName : string
relevent path and filename where the .h5 file will be created
and saved. If no filename, the h5 file will be named
NDBC(buoyNum).h5 in location where code is running.
Example
-------
To save data to h5 file after fetchFromWeb or loadFromText:
import WDRT.ESSC as ESSC
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
buoy46022.saveAsH5()
'''
if (fileName == None):
fileName = 'NDBC' + str(self.buoyNum) + '.h5'
else:
_, file_extension = os.path.splitext(fileName)
if not file_extension:
fileName = fileName + '.h5'
f = h5py.File(fileName, 'w')
self._saveData(f)
f.close()
print("Saved buoy data");
def saveAsTxt(self, savePath = "./Data/"):
"""
Saves spectral wave density data to a .txt file in the same format as the files
found on NDBC's website.
Parameters
----------
savePath : string
Relative file path where the .txt files will be saved.
Example
-------
To save data to h5 file after fetchFromWeb or loadFromText:
import WDRT.ESSC as ESSC
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
buoy46022.saveAsTxt()
"""
curYear = self.dateList[0][0]
dateIndexDiff = 0
bFile = False #NDBC sometimes splits years into two files, the second one titled "YYYYb"
saveDir = os.path.join(savePath, 'NDBC%s' % (self.buoyNum))
print("Saving in :", saveDir)
if not os.path.exists(saveDir):
os.makedirs(saveDir)
for i in range(len(self.swdList)):
if not bFile:
swdFile = open(os.path.join(saveDir, "SWD-%s-%d.txt" %
(self.buoyNum, curYear)), 'w')
else:
swdFile = open(os.path.join(saveDir, "SWD-%s-%db.txt" %
(self.buoyNum, curYear)), 'w')
bFile = False
freqLine = "YYYY MM DD hh"
for j in range(len(self.freqList[i])):
freqLine += (" " + "%2.4f" % self.freqList[i][j])
freqLine += "\n"
swdFile.write(freqLine)
for j in range(len(self.dateList)):
if (j + dateIndexDiff + 1) > len(self.dateList):
break
newYear = self.dateList[j + dateIndexDiff][0]
if curYear != newYear:
dateIndexDiff += (j)
curYear = newYear
break
if (j+1) > len(self.swdList[i]):
dateIndexDiff += (j)
bFile = True
break
swdLine = ' '.join("%0*d" % (2,dateVal) for dateVal in self.dateList[j + dateIndexDiff]) + " "
swdLine += " ".join("%6s" % val for val in self.swdList[i][j]) + "\n"
swdFile.write(swdLine)
def createSubsetBuoy(self, trainingSize):
'''Takes a given buoy and creates a subset buoy of a given length in years.
Parameters
----------
trainingSize : int
The size in years of the subset buoy you would like to create
Returns
-------
# subsetBuoy : ESSC.Buoy object
A buoy (with Hs, T, and dateList values) that is a subset of the given buoy
Example
-------
To get a corresponding subset of a buoy with a given number of years:
import WDRT.ESSC as ESSC
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create a subset of buoy 46022 consisting of the first 10 years
subsetBuoy = buoy46022.createSubsetBuoy(10)
'''
subsetBuoy = copy.deepcopy(self)
sortedIndex = sorted(range(len(self.dateNum)),key=lambda x:self.dateNum[x])
self.dateNum = self.dateNum[sortedIndex]
self.dateList = self.dateList[sortedIndex]
self.Hs = self.Hs[sortedIndex]
self.T = self.T[sortedIndex]
years = [0] * len(self.dateList)
for i in range(len(self.dateList)):
years[i] = self.dateList[i][0]
trainingYear = self.dateList[0][0] + trainingSize
cond = years <= trainingYear
subsetBuoy.Hs = self.Hs[cond]
subsetBuoy.T = self.T[cond]
subsetBuoy.dateList = self.dateList[cond]
return(subsetBuoy)
def _saveData(self, fileObj):
'''Organizes and saves wave height, energy period, and date data.'''
if(self.Hs is not None):
gbd = fileObj.create_group('buoy_Data')
f_Hs = gbd.create_dataset('Hs', data=self.Hs)
f_Hs.attrs['units'] = 'm'
f_Hs.attrs['description'] = 'significant wave height'
f_T = gbd.create_dataset('Te', data=self.T)
f_T.attrs['units'] = 'm'
f_T.attrs['description'] = 'energy period'
f_dateNum = gbd.create_dataset('dateNum', data=self.dateNum)
f_dateNum.attrs['description'] = 'datenum'
f_dateList = gbd.create_dataset('dateList', data=self.dateList)
f_dateList.attrs['description'] = 'date list'
else:
RuntimeError('Buoy object contains no data')
def __fetchCDIP(self,savePath,proxy):
"""
Fetches the Hs and T values of a CDIP site by downloading the respective .nc file from
http://cdip.ucsd.edu/
Parameters
----------
savePath : string
Relative path to place directory with data files.
"""
url = "http://thredds.cdip.ucsd.edu/thredds/fileServer/cdip/archive/" + str(self.buoyNum) + "p1/" + \
str(self.buoyNum) +"p1_historic.nc"
print("Downloading data from: " + url)
filePath = savePath + "/" + str(self.buoyNum) + "-CDIP.nc"
urllib.request.urlretrieve (url, filePath)
self.__processCDIPData(filePath)
def loadCDIP(self, filePath = None):
"""
Loads the Hs and T values of the given site from the .nc file downloaded from
http://cdip.ucsd.edu/
Parameters
----------
filePath : string
File path to the respective .nc file containing the Hs and T values
"""
if filePath == None:
filePath = "data/" + self.buoyNum + "-CDIP.nc"
self.__processCDIPData(filePath)
def __averageValues(self):
"""
Averages the Hs and T values of the given buoy to get hour time-steps rather than
half hour time-steps
"""
self.Hs = np.mean(self.Hs.reshape(-1,2), axis = 1)
self.T = np.mean(self.T.reshape(-1,2), axis = 1)
def __processCDIPData(self,filePath):
"""
Loads the Hs and T values from the .nc file downloaded from http://cdip.ucsd.edu/
Parameters
----------
filePath : string
File path to the respective .nc file containing the Hs and T values
"""
import netCDF4
try:
data = netCDF4.Dataset(filePath)
except IOError:
raise IOError("Could not find data for CDIP site: " + self.buoyNum)
self.Hs = np.array(data["waveHs"][:], dtype = np.double)
self.T = np.array(data["waveTa"][:], dtype = np.double)
data.close()
#Some CDIP buoys record data every half hour rather than every hour
if len(self.Hs)%2 == 0:
self.__averageValues()
def _prepData(self):
'''Runs _getStats and _getDataNums for full set of data, then removes any
NaNs. This cleans and prepares the data for use. Returns wave height,
energy period, and dates.
'''
n = len(self.swdList)
Hs = []
T = []
dateNum = []
for ii in range(n):
tmp1, tmp2 = _getStats(self.swdList[ii], self.freqList[ii])
Hs.extend(tmp1)
T.extend(tmp2)
dateNum.extend(_getDateNums(self.dateList[ii]))
dateList = [date for year in self.dateList for date in year]
Hs = np.array(Hs, dtype=np.float)
T = | np.array(T, dtype=np.float) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 7 21:29:28 2019
@author: prasad
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn import preprocessing
def get_data(column_names):
train_df = pd.read_csv('./data/20_percent_missing_train.txt', header=None,sep=',').values
test_df = pd.read_csv('./data/20_percent_missing_train.txt', header=None, sep=',').values
train_labels = train_df[:][-1]
test_labels = test_df[:][-1]
train_data = train_df[:][:-1]
test_data = test_df[:][:-1]
return train_data, train_labels, test_data, test_labels
def split(data, num_of_splits = 10):
# Special Splitting
# Group 1 will consist of points {1,11,21,...}, Group 2 will consist of
# points {2,12,22,...}, ..., and Group 10 will consist of points {10,20,30,...}
folds = []
for k in range(num_of_splits):
fold = []
for i in range(k, len(data), num_of_splits):
fold.append(data[i])
fold = np.array(fold)
folds.append(fold)
return folds
# assuming features are bernoulli variables
def train_naive_bayes(train_data, train_labels, test_data, test_labels, error_table, preds, labels):
mean = np.nanmean(train_data, axis=0)
train_data = binarize(train_data, mean)
non_spam_indices = np.argwhere(train_labels == 0).flatten()
spam_indices = np.argwhere(train_labels == 1).flatten()
# get spam and non_spam data
non_spam_data = np.take(train_data, non_spam_indices, axis = 0)
spam_data = np.take(train_data, spam_indices, axis = 0)
# priors
priors = get_priors(train_labels)
count_non_spam = count(non_spam_data)
count_spam = count(spam_data)
counts = np.array([count_non_spam, count_spam])
probabilities = prob(counts)
predictions = []
test_data = binarize(test_data, mean)
for pt in range(len(test_data)):
data = test_data[pt]
pred = (probabilities * data).sum(axis=1) + priors
predictions.append(np.argmax(pred))
c_mat = conf_matrix(predictions, test_labels)
error_table.append(c_mat)
preds.append(predictions)
labels.append(test_labels)
return acc(predictions, test_labels)
def get_priors(labels):
count_spam = np.count_nonzero(labels)
count_non_spam = len(labels) - count_spam
priors = np.array([np.log(count_non_spam/ len(labels)), np.log(count_spam / len(labels))])
return priors
def acc(preds, labels):
check = (preds == labels).astype(int)
count = np.count_nonzero(check)
return count / len(labels)
def prob(feature_count):
return np.log(feature_count / feature_count.sum(axis=1)[np.newaxis].T)
def count(data):
greaters = np.sum(data, axis=0) + 1.0
return greaters
def binarize(data, mean):
data = (data < mean).astype(int)
return data
def normalize(dataset, train_size):
'''
Args
dataset: data to be normalized using shift-scale normalization
Returns
dataset: normalized dataset
'''
dataset = preprocessing.minmax_scale(dataset, feature_range=(0, 1))
train_data = dataset[:train_size]
test_data = dataset[train_size:]
return train_data, test_data
def get_labels(data):
data_labels = data[:, -1]
data = data[:, :-1]
return data, data_labels
def conf_matrix(preds, test_labels):
tp = 0
fp = 0
tn = 0
fn = 0
for i in range(len(preds)):
p = preds[i]
if p == test_labels[i]:
if p == 1:
tp += 1
else:
tn += 1
else:
if p == 1:
fp += 1
else:
fn += 1
return np.array([tp, fp, fn, tn])
def plot_roc(truth, preds):
fprs, tprs, _ = metrics.roc_curve(truth, preds)
plt.figure(figsize = (15,10))
plt.title('ROC')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.plot(fprs, tprs, label = 'AUC: {}'.format(metrics.auc(fprs, tprs)))
plt.legend(loc = 'lower right')
def train_folds(folds):
accs = []
error_table = []
predictions = []
labels = []
# for each fold
for k in range(len(folds)):
# kth fold selected as test set
test_data = | np.array(folds[k]) | numpy.array |
# -*- coding: utf-8 -*-
import cobra
from cobra.flux_analysis import flux_variability_analysis
from .read_spreadsheets import read_spreadsheets
from .write_spreadsheet import write_spreadsheet
import numpy as np
import re
import copy
from cobra.flux_analysis import pfba
try:
from cobra.flux_analysis import sample
except:
from cobra.sampling import sample
try:
cobra_config = cobra.Configuration()
cobra_config.solver = "glpk"
#print("glpk set as default solver")
except:
print("could not set glpk to default solver")
import cobra.util.solver as sutil
from pprint import pprint
def round_sig(x, sig=2):
if x==0:
value=0
else:
value=round(x, sig-int(math.floor(math.log10(abs(x))))-1)
return value
from cobra.flux_analysis import (
single_gene_deletion, single_reaction_deletion, double_gene_deletion,
double_reaction_deletion)
from cobra.manipulation.delete import find_gene_knockout_reactions, remove_genes
from cobra import Reaction
from cobra.flux_analysis.variability import flux_variability_analysis as cobra_flux_variability_analysis
def flux_variability_analysis(model,fraction_of_optimum=0,tolerance_feasibility=1e-6,reaction_list=None):
fva={}
if reaction_list!=None:
if isinstance(reaction_list[0], str):
reaction_list=[model.reactions.get_by_id(x) for x in reaction_list]
try:
pandas_fva=cobra_flux_variability_analysis(model,fraction_of_optimum=fraction_of_optimum,reaction_list=reaction_list)
except:
cobra.io.write_sbml_model(model,"failed_model.sbml")
raise Exception('FVA failed, error model saved as failed_model.sbml')
for reaction in pandas_fva.index:
fva[reaction]={"maximum":pandas_fva.loc[reaction]["maximum"],"minimum":pandas_fva.loc[reaction]["minimum"]}
return fva
def remove_isoforms_information(model,separator="\."):
genes_to_delete=[]
for reaction in model.reactions:
replace_dict={}
for gene in reaction.genes:
gene_match=re.match("^(.+)"+separator, gene.id)
if gene_match==None:
continue
replace_dict[gene.id]=gene_match.group(1)
print(gene.id+"->"+gene_match.group(1))
gene_reaction_rule=reaction.gene_reaction_rule
for gene_id in replace_dict:
#gene_reaction_rule=gene_reaction_rule.replace("("+gene_id,"("+replace_dict[gene_id]).replace(" "+gene_id," "+replace_dict[gene_id])
gene_reaction_rule=gene_reaction_rule.replace("("+gene_id,"("+replace_dict[gene_id]).replace(" "+gene_id," "+replace_dict[gene_id])
gene_reaction_rule=re.sub("^"+gene_id, replace_dict[gene_id], gene_reaction_rule, count=0, flags=0)
if len(reaction.genes)==1:
gene_reaction_rule=gene_reaction_rule.replace(gene_id,replace_dict[gene_id])
if gene_id not in genes_to_delete:
genes_to_delete.append(gene_id)
print(reaction.gene_reaction_rule)
print(gene_reaction_rule)
reaction.gene_reaction_rule=gene_reaction_rule
genes_to_remove=[]
print(genes_to_remove)
for gene in model.genes:
if len(gene.reactions)==0:
print(gene)
genes_to_remove.append(gene)
for gene in genes_to_remove:
#print gene.id
try:
model.genes.get_by_id(gene.id).remove_from_model()
#gene.remove_from_model()"""
except:
print(gene.id + "could not be removed")
def sampling(model,n=100,processes=6,objective=None,starts=1,return_matrix=False,method="optgp",thinning=100):
print(method, thinning)
reaction_ids=[x.id for x in model.reactions]
if objective!=None:
print(model.reactions.get_by_id(objective).lower_bound)
flux_dict_list=[]
for i in range(0,starts):
result_matrix = sample(model, n,processes=processes,method=method,thinning=thinning).to_numpy() #Valid methods are optgp and achr. Process is only used in optgp. Thinning (“Thinning” means only recording samples every n iterations) is only used in achr
result_matrix=np.asmatrix(result_matrix)
if not return_matrix:
for row in result_matrix:
flux_dict={}
for n_flux,flux in enumerate(row):
flux_dict[reaction_ids[n_flux]]=flux
flux_dict_list.append(flux_dict)
if objective!=None:
print(flux_dict[objective])
elif return_matrix:
if i==0:
aggregated_results=result_matrix
else:
aggregated_results=np.vstack((aggregated_results,result_matrix))
if not return_matrix:
return flux_dict_list
else:
return np.transpose(aggregated_results), reaction_ids
def sampling_matrix_get_mean_sd(aggregated_results,reaction_ids,include_absolute_val_stats=False,percentiles=[25,50,75]):
stat_dict={}
for n,row in enumerate(aggregated_results):
mean=np.mean(row)
std= | np.std(row) | numpy.std |
# -*- coding: utf-8 -*-
# @Time : 2018/8/7 13:30
# @Author : <NAME>
# @File : feature_pu_model_utils.py
from utils.plain_model_utils import ModelUtils
import numpy as np
class FeaturedDetectionModelUtils(ModelUtils):
def __init__(self, dp):
super(FeaturedDetectionModelUtils, self).__init__()
self.dp = dp
def add_dict_info(self, sentences, windowSize, datasetName):
perBigDic = set()
locBigDic = set()
orgBigDic = set()
miscBigDic = set()
with open("feature_dictionary/" + datasetName + "/personBigDic.txt", "r",encoding='utf-8') as fw:
for line in fw:
line = line.strip()
if len(line) > 0:
perBigDic.add(line)
with open("feature_dictionary/" + datasetName + "/locationBigDic.txt", "r",encoding='utf-8') as fw:
for line in fw:
line = line.strip()
if len(line) > 0:
locBigDic.add(line)
with open("feature_dictionary/" + datasetName + "/organizationBigDic.txt", "r",encoding='utf-8') as fw:
for line in fw:
line = line.strip()
if len(line) > 0:
orgBigDic.add(line)
if self.dp.dataset != "muc" and self.dp.dataset != "twitter":
with open("feature_dictionary/" + datasetName + "/miscBigDic.txt", "r",encoding='utf-8') as fw:
for line in fw:
line = line.strip()
if len(line) > 0:
miscBigDic.add(line)
for i, sentence in enumerate(sentences):
for j, data in enumerate(sentence):
feature = np.zeros([4, windowSize], dtype=int)
maxLen = len(sentence)
remainLenRight = maxLen - j - 1
rightSize = min(remainLenRight, windowSize - 1)
remainLenLeft = j
leftSize = min(remainLenLeft, windowSize - 1)
k = 0
words = []
words.append(sentence[j][0])
while k < rightSize:
# right side
word = sentence[j + k + 1][0]
temp = words[-1]
word = temp + " " + word
words.append(word)
k += 1
k = 0
while k < leftSize:
# left side
word = sentence[j - k - 1][0]
temp = words[0]
word = word + " " + temp
words.insert(0, word)
k += 1
for idx, word in enumerate(words):
count = len(word.split())
if word in perBigDic:
feature[self.dp.tag2Idx["PER"]][count - 1] = 1
elif word in locBigDic:
feature[self.dp.tag2Idx["LOC"]][count - 1] = 1
elif word in orgBigDic:
feature[self.dp.tag2Idx["ORG"]][count - 1] = 1
feature = feature.reshape([-1]).tolist()
sentences[i][j] = [data[0], data[1], feature, data[2], data[3]]
def createMatrices(self, sentences, word2Idx, case2Idx, char2Idx):
unknownIdx = word2Idx['UNKNOWN_TOKEN']
paddingIdx = word2Idx['PADDING_TOKEN']
dataset = []
wordCount = 0
unknownWordCount = 0
for sentence in sentences:
wordIndices = []
caseIndices = []
charIndices = []
featureList = []
entityFlags = []
labeledFlags = []
for word, char, feature, ef, lf in sentence:
wordCount += 1
if word in word2Idx:
wordIdx = word2Idx[word]
elif word.lower() in word2Idx:
wordIdx = word2Idx[word.lower()]
else:
wordIdx = unknownIdx
unknownWordCount += 1
charIdx = []
for x in char:
if x in char2Idx:
charIdx.append(char2Idx[x])
else:
charIdx.append(char2Idx["UNKNOWN"])
wordIndices.append(wordIdx)
caseIndices.append(self.get_casing(word, case2Idx))
charIndices.append(charIdx)
featureList.append(feature)
entityFlags.append(ef)
labeledFlags.append(lf)
dataset.append(
[wordIndices, caseIndices, charIndices, featureList, entityFlags, labeledFlags])
return dataset
def padding(self, sentences):
maxlen = 52
for i, sentence in enumerate(sentences):
mask = np.zeros([len(sentences[i][2]), maxlen])
for j, chars in enumerate(sentences[i][2]):
for k, c in enumerate(chars):
if k < maxlen:
mask[j][k] = c
sentences[i][2] = mask.tolist()
sentences_X = []
sentences_Y = []
sentences_LF = []
for i, sentence in enumerate(sentences):
sentences_X.append(sentence[:4])
sentences_Y.append(sentence[4])
sentences_LF.append(sentence[5])
return np.array(sentences_X), np.array(sentences_Y), | np.array(sentences_LF) | numpy.array |
# Original filename: dewarp.py
#
# Author: <NAME>
# Email: <EMAIL>
# Date: March 2011
#
# Summary: Dewarp, recenter, and rotate an image.
#
import numpy as np
import scipy
import scipy.ndimage
import scipy.interpolate
import pyfits
import math
def distortion_map(dimen=2048, mjd=55000):
"""
Function distortion_map takes two arguments:
1. A 2048 x 2048 array of flux values
2. The MJD of observations, used to decide which distortion map to use
Optional argument:
2. A 2 element list for the new center [y, x], default [1024, 1024]
3. Angle in radians through which to rotate clockwise, default 0
4. Minimum proportion of data from good pixels
(if at least this proportion of the interpolated value
is from bad pixels, set pixel to NaN), default 0.5
5. Integer dimension of output array (default 2897)
dewarp applies the distortion correction and recenters the
resulting image in a 2897x2897 array. The coordinates of the
central point are defined in the input 2048x2048 array. The
function dewarp returns an HDU with the dewarped, rotated, and
recentered array.
"""
dimy = dimen
dimx = dimen
#################################################################
# Coefficients determined from the MCMC
#################################################################
# This is the map prior to May 2011. MJD 55682=May 1st, 2011
if mjd < 55680:
a = [-5.914548e-01, -3.835090e-03, -4.091949e-05,
1.056099e+02, -2.330969e-02, -2.250246e-03,
-1.624182e-02, 2.437204e-04, -3.810423e-03]
b = [1.022675e+02, -1.490726e-02, -2.589800e-03,
5.355218e-01, 2.314851e-03, 5.392667e-04,
2.279097e-02, -8.767285e-04, 1.290849e-03]
# Plate scale differs by 8% with the optical secondary, used
# in September 2012 (September 10 = MJD 56180)
elif | np.abs(mjd - 56180) | numpy.abs |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =============================================================================
# Import
# =============================================================================
from collections import defaultdict, OrderedDict
from matplotlib.pyplot import figure
import matplotlib.pyplot as plt
import scipy.spatial as spatial
from os.path import exists
import numpy as np
import argparse
import sys
import os
# =============================================================================
# Argparse some parameters
# =============================================================================
parser = argparse.ArgumentParser()
parser.add_argument("-seed", "--seed", default=0, type=int)
parser.add_argument("-type", "--type", default='default', type=str)
parser.add_argument("-distanciation", "--distanciation", default=16, type=float)
parser.add_argument("-n_population", "--n_population", default=1, type=int)
parser.add_argument("-transfer_rate", "--transfer_rate", default=0.0, type=float)
parser.add_argument("-transfer_proportion", "--transfer_proportion", default=0.01, type=float)
parser.add_argument("-curfew", "--curfew", default=24, type=int)
parser.add_argument("-confined", "--confined", default=0.0, type=float)
args = parser.parse_args() #"--seed 20 --distanciation 0.0".split(' ')
seed = args.seed
#define simulation name
simulation_name = f'fix4_{args.type}_{seed}_{args.distanciation}_{args.transfer_rate}_{args.curfew}_{args.confined}'
if exists(f'simulation/{simulation_name}/logs.pydict'):
sys.exit()
print('Simulation name:', simulation_name)
# =============================================================================
# Simulation parameters - Starting hypotheses
# -----------------------------------------------------------------------------
# n_ is an absolute number
# p_ is a probability/proportion with 1 = 100%
# =============================================================================
#primary parameters
n_hours = 2 * 30 * 24 #2 months
n_population = args.n_population #number of densed population / cities
n_persons_per_population = 1000
p_contaminated_per_population = 0.01 #init proportion of contaminated people
distance_to_pcontamination = OrderedDict({0.1:0.95, 0.5:0.9, 1:0.7, 2:0.6, 5:0.3}) #probability is applied each hour
starting_distanciation = args.distanciation #meters | "density"
default_movement = 2 #meters per hour
contamination_duration = 14 * 24 #hours
delay_before_infectious = 3 * 24 #hours
death_possibility_delay = 9*24 #hours
p_lethality = 0.006
p_lethality_per_hour = 0.006/(contamination_duration-death_possibility_delay)
wake_up_time = 8
sleep_up_time = 24
curfew = False if args.curfew == 24 else args.curfew
sleep_up_time = sleep_up_time if not curfew else curfew
moving_time = list(range(wake_up_time, sleep_up_time))
p_confined = args.confined #proportion of people not moving each day
probability_population_transfer = args.transfer_rate
proportion_population_transfer = args.transfer_proportion
#secondary parameters
move_before_start = 50
init_delta_xy = 3 #+/- n meters initializing the population
mean_hours_since_contamination = 3*24;
std_hours_since_contamination = 2*24
#non-parameters
np.random.seed(seed)
VULNERABLE = 0
IMMUNIZED = -1
DEAD = -2
plt.ioff()
colors = ['black', 'limegreen', 'dodgerblue', 'tomato']
simulation_dir = f'simulation/{simulation_name}'
#check
assert death_possibility_delay < contamination_duration
assert sleep_up_time > wake_up_time
assert VULNERABLE > IMMUNIZED > DEAD
assert not (p_confined > 0 and probability_population_transfer > 0), 'currently not compatible'
if not exists(simulation_dir):
os.mkdir(simulation_dir)
# =============================================================================
# Generate populations
# -----------------------------------------------------------------------------
# generate populations in a grid pattern, each person is separated by starting_distanciation
# =============================================================================
populations_yx = []
for i_pop in range(0, n_population):
border = int(np.sqrt(n_persons_per_population))+1
xpos = [int(i/border) for i in list(range(0,n_persons_per_population))]
ypos = list(range(0, border)) * border
ypos = ypos[0:n_persons_per_population]
population = np.array((ypos, xpos), dtype=np.float)
population *= starting_distanciation
for i in range(0,move_before_start):
population += | np.random.uniform(-init_delta_xy,init_delta_xy,population.shape) | numpy.random.uniform |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for input_pipeline."""
import functools
from typing import Sequence
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import tensorflow_datasets as tfds
from sparse_mixers import input_pipeline
import sentencepiece as spm
class MockTokenizer(spm.SentencePieceProcessor):
"""Mock tokenizer returning pre-specified tokens."""
def EncodeAsIds(self, text):
del text # Ignore input and return dummy output
return np.array([6, 7, 8])
def pad_id(self):
return 1
def eos_id(self):
return 2
def bos_id(self):
return 3
def PieceToId(self, text):
del text # Ignore input and return dummy output
return np.random.randint(5, 20)
def GetPieceSize(self):
return 20
class InputPipelineTest(parameterized.TestCase):
def test_clean_multirc_inputs(self):
self.assertEqual(
input_pipeline._clean_multirc_inputs(
dataset_name="super_glue/multirc", text=b"<br>html</b>"), " html ")
self.assertEqual(
input_pipeline._clean_multirc_inputs(
dataset_name="super_glue/multirc", text=b"clean"), "clean")
self.assertEqual(
input_pipeline._clean_multirc_inputs(
dataset_name="not_multirc", text="<br>html</b>"), "<br>html</b>")
@parameterized.parameters(
"glue/cola", "glue/sst2", "glue/mrpc", "glue/qqp", "glue/stsb",
"glue/mnli", "glue/qnli", "glue/rte", "glue/wnli", "super_glue/boolq",
"super_glue/cb", "super_glue/copa", "super_glue/multirc",
"super_glue/record", "super_glue/rte", "super_glue/wic", "super_glue/wsc",
"super_glue/wsc.fixed", "super_glue/axb", "super_glue/axg")
def test_classification_inputs(self, dataset_name):
batch_size = 2
max_seq_length = 4
data_pipeline = functools.partial(
input_pipeline.classification_inputs,
split=tfds.Split.TRAIN,
batch_size=batch_size,
tokenizer=MockTokenizer(),
max_seq_length=max_seq_length)
with tfds.testing.mock_data(num_examples=10):
for batch, _ in zip(data_pipeline(dataset_name=dataset_name), range(1)):
self.assertSetEqual(
set(batch.keys()), {"input_ids", "type_ids", "idx", "label"})
self.assertTupleEqual(batch["input_ids"].shape,
(batch_size, max_seq_length))
self.assertTupleEqual(batch["type_ids"].shape,
(batch_size, max_seq_length))
self.assertTupleEqual(batch["idx"].shape, (batch_size,))
self.assertEqual(batch["label"].shape[0], batch_size)
def test_singularize_copa_examples(self):
dataset = [{
"idx":
np.array([0]),
"premise":
np.array(["I packed up my belongings."], dtype="object"),
"question":
np.array(["cause"], dtype="object"),
"choice1":
| np.array(["I was hunting for a new apartment."], dtype="object") | numpy.array |
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pandas as pd
import scipy.sparse as sp
from typing import List, Dict
from GridCal.Engine.basic_structures import Logger
import GridCal.Engine.Core.topology as tp
from GridCal.Engine.Core.multi_circuit import MultiCircuit
from GridCal.Engine.basic_structures import BranchImpedanceMode
from GridCal.Engine.basic_structures import BusMode
from GridCal.Engine.Simulations.PowerFlow.jacobian_based_power_flow import Jacobian
from GridCal.Engine.Core.common_functions import compile_types, find_different_states
class OpfTimeCircuit:
def __init__(self, nbus, nline, ntr, nvsc, nhvdc, nload, ngen, nbatt, nshunt, nstagen, ntime, sbase, time_array,
apply_temperature=False, branch_tolerance_mode: BranchImpedanceMode = BranchImpedanceMode.Specified):
"""
:param nbus: number of buses
:param nline: number of lines
:param ntr: number of transformers
:param nvsc:
:param nhvdc:
:param nload:
:param ngen:
:param nbatt:
:param nshunt:
"""
self.nbus = nbus
self.nline = nline
self.ntr = ntr
self.nvsc = nvsc
self.nhvdc = nhvdc
self.nload = nload
self.ngen = ngen
self.nbatt = nbatt
self.nshunt = nshunt
self.nstagen = nstagen
self.ntime = ntime
self.Sbase = sbase
self.apply_temperature = apply_temperature
self.branch_tolerance_mode = branch_tolerance_mode
self.time_array = time_array
# bus ----------------------------------------------------------------------------------------------------------
self.bus_names = np.empty(nbus, dtype=object)
self.bus_types = np.empty(nbus, dtype=int)
self.bus_installed_power = np.zeros(nbus, dtype=float)
self.bus_active = np.ones((ntime, nbus), dtype=int)
self.Vbus = np.ones((ntime, nbus), dtype=complex)
# branch common ------------------------------------------------------------------------------------------------
self.nbr = nline + ntr + nvsc # exclude the HVDC model since it is not a real branch
self.branch_names = np.empty(self.nbr, dtype=object)
self.branch_active = np.zeros((ntime, self.nbr), dtype=int)
self.F = np.zeros(self.nbr, dtype=int) # indices of the "from" buses
self.T = np.zeros(self.nbr, dtype=int) # indices of the "to" buses
self.branch_rates = np.zeros((ntime, self.nbr), dtype=float)
self.branch_cost = np.zeros((ntime, self.nbr), dtype=float)
self.branch_R = np.zeros(self.nbr, dtype=float)
self.branch_X = np.zeros(self.nbr, dtype=float)
self.C_branch_bus_f = sp.lil_matrix((self.nbr, nbus), dtype=int) # connectivity branch with their "from" bus
self.C_branch_bus_t = sp.lil_matrix((self.nbr, nbus), dtype=int) # connectivity branch with their "to" bus
# lines --------------------------------------------------------------------------------------------------------
self.line_names = np.zeros(nline, dtype=object)
self.line_R = np.zeros(nline, dtype=float)
self.line_X = np.zeros(nline, dtype=float)
self.line_B = np.zeros(nline, dtype=float)
self.line_temp_base = np.zeros(nline, dtype=float)
self.line_temp_oper = np.zeros(nline, dtype=float)
self.line_alpha = np.zeros(nline, dtype=float)
self.line_impedance_tolerance = np.zeros(nline, dtype=float)
self.C_line_bus = sp.lil_matrix((nline, nbus), dtype=int) # this ons is just for splitting islands
# transformer 2W + 3W ------------------------------------------------------------------------------------------
self.tr_names = np.zeros(ntr, dtype=object)
self.tr_R = np.zeros(ntr, dtype=float)
self.tr_X = np.zeros(ntr, dtype=float)
self.tr_G = np.zeros(ntr, dtype=float)
self.tr_B = np.zeros(ntr)
self.tr_tap_f = np.ones(ntr) # tap generated by the difference in nominal voltage at the form side
self.tr_tap_t = np.ones(ntr) # tap generated by the difference in nominal voltage at the to side
self.tr_tap_mod = np.ones(ntr) # normal tap module
self.tr_tap_ang = np.zeros(ntr) # normal tap angle
self.C_tr_bus = sp.lil_matrix((ntr, nbus), dtype=int) # this ons is just for splitting islands
# hvdc line ----------------------------------------------------------------------------------------------------
self.hvdc_names = np.zeros(nhvdc, dtype=object)
self.hvdc_active = np.zeros((ntime, nhvdc), dtype=bool)
self.hvdc_rate = np.zeros((ntime, nhvdc), dtype=float)
self.hvdc_Pf = np.zeros((ntime, nhvdc))
self.hvdc_Pt = np.zeros((ntime, nhvdc))
self.C_hvdc_bus_f = sp.lil_matrix((nhvdc, nbus), dtype=int) # this ons is just for splitting islands
self.C_hvdc_bus_t = sp.lil_matrix((nhvdc, nbus), dtype=int) # this ons is just for splitting islands
# vsc converter ------------------------------------------------------------------------------------------------
self.vsc_names = np.zeros(nvsc, dtype=object)
self.vsc_R1 = np.zeros(nvsc)
self.vsc_X1 = np.zeros(nvsc)
self.vsc_Gsw = np.zeros(nvsc)
self.vsc_Beq = np.zeros(nvsc)
self.vsc_m = np.zeros(nvsc)
self.vsc_theta = np.zeros(nvsc)
self.C_vsc_bus = sp.lil_matrix((nvsc, nbus), dtype=int) # this ons is just for splitting islands
# load ---------------------------------------------------------------------------------------------------------
self.load_names = np.empty(nload, dtype=object)
self.load_active = np.zeros((ntime, nload), dtype=bool)
self.load_s = np.zeros((ntime, nload), dtype=complex)
self.load_cost = np.zeros((ntime, nload))
self.C_bus_load = sp.lil_matrix((nbus, nload), dtype=int)
# static generators --------------------------------------------------------------------------------------------
self.static_generator_names = np.empty(nstagen, dtype=object)
self.static_generator_active = np.zeros((ntime, nstagen), dtype=bool)
self.static_generator_s = np.zeros((ntime, nstagen), dtype=complex)
self.C_bus_static_generator = sp.lil_matrix((nbus, nstagen), dtype=int)
# battery ------------------------------------------------------------------------------------------------------
self.battery_names = np.empty(nbatt, dtype=object)
self.battery_controllable = np.zeros(nbatt, dtype=bool)
self.battery_dispatchable = np.zeros(nbatt, dtype=bool)
self.battery_pmin = np.zeros(nbatt)
self.battery_pmax = np.zeros(nbatt)
self.battery_enom = np.zeros(nbatt)
self.battery_min_soc = np.zeros(nbatt)
self.battery_max_soc = np.zeros(nbatt)
self.battery_soc_0 = np.zeros(nbatt)
self.battery_charge_efficiency = np.zeros(nbatt)
self.battery_discharge_efficiency = np.zeros(nbatt)
self.battery_installed_p = np.zeros(nbatt)
self.battery_active = np.zeros((ntime, nbatt), dtype=bool)
self.battery_p = np.zeros((ntime, nbatt))
self.battery_pf = np.zeros((ntime, nbatt))
self.battery_v = np.zeros((ntime, nbatt))
self.battery_cost = np.zeros((ntime, nbatt))
self.C_bus_batt = sp.lil_matrix((nbus, nbatt), dtype=int)
# generator ----------------------------------------------------------------------------------------------------
self.generator_names = np.empty(ngen, dtype=object)
self.generator_controllable = np.zeros(ngen, dtype=bool)
self.generator_dispatchable = np.zeros(ngen, dtype=bool)
self.generator_installed_p = np.zeros(ngen)
self.generator_pmin = np.zeros(ngen)
self.generator_pmax = np.zeros(ngen)
self.generator_active = np.zeros((ntime, ngen), dtype=bool)
self.generator_p = np.zeros((ntime, ngen))
self.generator_pf = np.zeros((ntime, ngen))
self.generator_v = np.zeros((ntime, ngen))
self.generator_cost = np.zeros((ntime, ngen))
self.C_bus_gen = sp.lil_matrix((nbus, ngen), dtype=int)
# shunt --------------------------------------------------------------------------------------------------------
self.shunt_names = np.empty(nshunt, dtype=object)
self.shunt_active = np.zeros((ntime, nshunt), dtype=bool)
self.shunt_admittance = np.zeros((ntime, nshunt), dtype=complex)
self.C_bus_shunt = sp.lil_matrix((nbus, nshunt), dtype=int)
# --------------------------------------------------------------------------------------------------------------
# Arrays for the simulation
# --------------------------------------------------------------------------------------------------------------
self.Sbus = np.zeros((self.nbus, ntime), dtype=complex)
self.Ibus = np.zeros((self.nbus, ntime), dtype=complex)
self.Yshunt_from_devices = np.zeros((self.nbus, ntime), dtype=complex)
self.Qmax_bus = np.zeros((self.nbus, ntime))
self.Qmin_bus = np.zeros((self.nbus, ntime))
# only one Y matrix per time island, that is the guarantee we get by splitting the TimeCircuit in TimeIslands
self.Ybus = None
self.Yf = None
self.Yt = None
self.Yseries = None
self.Yshunt = None
# self.Ysh_helm = None
self.B1 = None
self.B2 = None
self.Bpqpv = None
self.Bref = None
self.original_time_idx = np.arange(self.ntime)
self.original_bus_idx = np.arange(self.nbus)
self.original_branch_idx = np.arange(self.nbr)
self.original_tr_idx = np.arange(self.ntr)
self.original_gen_idx = np.arange(self.ngen)
self.original_bat_idx = np.arange(self.nbatt)
self.pq = list()
self.pv = list()
self.vd = list()
self.pqpv = list()
self.available_structures = ['Vbus', 'Sbus', 'Ibus', 'Ybus', 'Yshunt', 'Yseries',
"B'", "B''", 'Types', 'Jacobian', 'Qmin', 'Qmax']
def consolidate(self):
"""
Consolidates the information of this object
:return:
"""
self.C_branch_bus_f = self.C_branch_bus_f.tocsc()
self.C_branch_bus_t = self.C_branch_bus_t.tocsc()
self.C_line_bus = self.C_line_bus.tocsc()
self.C_tr_bus = self.C_tr_bus.tocsc()
self.C_hvdc_bus_f = self.C_hvdc_bus_f.tocsc()
self.C_hvdc_bus_t = self.C_hvdc_bus_t.tocsc()
self.C_vsc_bus = self.C_vsc_bus.tocsc()
self.C_bus_load = self.C_bus_load.tocsr()
self.C_bus_batt = self.C_bus_batt.tocsr()
self.C_bus_gen = self.C_bus_gen.tocsr()
self.C_bus_shunt = self.C_bus_shunt.tocsr()
self.C_bus_static_generator = self.C_bus_static_generator.tocsr()
self.bus_installed_power = self.C_bus_gen * self.generator_installed_p
self.bus_installed_power += self.C_bus_batt * self.battery_installed_p
def get_power_injections(self):
"""
Compute the power
:return: Array of power injections
"""
# load
Sbus = - self.C_bus_load * (self.load_s * self.load_active).T # MW
# static generators
Sbus += self.C_bus_static_generator * (self.static_generator_s * self.static_generator_active).T # MW
# generators
Sbus += self.C_bus_gen * (self.generator_p * self.generator_active).T
# battery
Sbus += self.C_bus_batt * (self.battery_p * self.battery_active).T
# HVDC forced power
if self.nhvdc:
Sbus += ((self.hvdc_active * self.hvdc_Pf) * self.C_hvdc_bus_f).T
Sbus += ((self.hvdc_active * self.hvdc_Pt) * self.C_hvdc_bus_t).T
Sbus /= self.Sbase
return Sbus
def R_corrected(self):
"""
Returns temperature corrected resistances (numpy array) based on a formula
provided by: NFPA 70-2005, National Electrical Code, Table 8, footnote #2; and
https://en.wikipedia.org/wiki/Electrical_resistivity_and_conductivity#Linear_approximation
(version of 2019-01-03 at 15:20 EST).
"""
return self.line_R * (1.0 + self.line_alpha * (self.line_temp_oper - self.line_temp_base))
def compute_admittance_matrices(self):
"""
Compute the admittance matrices
:return: Ybus, Yseries, Yshunt
"""
t = self.original_time_idx[0]
# form the connectivity matrices with the states applied -------------------------------------------------------
br_states_diag = sp.diags(self.branch_active[t, :])
Cf = br_states_diag * self.C_branch_bus_f
Ct = br_states_diag * self.C_branch_bus_t
# Declare the empty primitives ---------------------------------------------------------------------------------
# The composition order is and will be: Pi model, HVDC, VSC
Ytt = np.empty(self.nbr, dtype=complex)
Yff = np.empty(self.nbr, dtype=complex)
Yft = np.empty(self.nbr, dtype=complex)
Ytf = np.empty(self.nbr, dtype=complex)
# Branch primitives in vector form, for Yseries
Ytts = np.empty(self.nbr, dtype=complex)
Yffs = np.empty(self.nbr, dtype=complex)
Yfts = np.empty(self.nbr, dtype=complex)
Ytfs = np.empty(self.nbr, dtype=complex)
ysh_br = np.empty(self.nbr, dtype=complex)
# line ---------------------------------------------------------------------------------------------------------
a = 0
b = self.nline
# use the specified of the temperature-corrected resistance
if self.apply_temperature:
line_R = self.R_corrected()
else:
line_R = self.line_R
# modify the branches impedance with the lower, upper tolerance values
if self.branch_tolerance_mode == BranchImpedanceMode.Lower:
line_R *= (1 - self.line_impedance_tolerance / 100.0)
elif self.branch_tolerance_mode == BranchImpedanceMode.Upper:
line_R *= (1 + self.line_impedance_tolerance / 100.0)
Ys_line = 1.0 / (line_R + 1.0j * self.line_X)
Ysh_line = 1.0j * self.line_B
Ys_line2 = Ys_line + Ysh_line / 2.0
# branch primitives in vector form for Ybus
Ytt[a:b] = Ys_line2
Yff[a:b] = Ys_line2
Yft[a:b] = - Ys_line
Ytf[a:b] = - Ys_line
# branch primitives in vector form, for Yseries
Ytts[a:b] = Ys_line
Yffs[a:b] = Ys_line
Yfts[a:b] = - Ys_line
Ytfs[a:b] = - Ys_line
ysh_br[a:b] = Ysh_line / 2.0
# transformer models -------------------------------------------------------------------------------------------
a = self.nline
b = a + self.ntr
Ys_tr = 1.0 / (self.tr_R + 1.0j * self.tr_X)
Ysh_tr = 1.0j * self.tr_B
Ys_tr2 = Ys_tr + Ysh_tr / 2.0
tap = self.tr_tap_mod * np.exp(1.0j * self.tr_tap_ang)
# branch primitives in vector form for Ybus
Ytt[a:b] = Ys_tr2 / (self.tr_tap_t * self.tr_tap_t)
Yff[a:b] = Ys_tr2 / (self.tr_tap_f * self.tr_tap_f * tap * np.conj(tap))
Yft[a:b] = - Ys_tr / (self.tr_tap_f * self.tr_tap_t * np.conj(tap))
Ytf[a:b] = - Ys_tr / (self.tr_tap_t * self.tr_tap_f * tap)
# branch primitives in vector form, for Yseries
Ytts[a:b] = Ys_tr
Yffs[a:b] = Ys_tr / (tap * np.conj(tap))
Yfts[a:b] = - Ys_tr / np.conj(tap)
Ytfs[a:b] = - Ys_tr / tap
ysh_br[a:b] = Ysh_tr / 2.0
# VSC MODEL ----------------------------------------------------------------------------------------------------
a = self.nline + self.ntr
b = a + self.nvsc
Y_vsc = 1.0 / (self.vsc_R1 + 1.0j * self.vsc_X1) # Y1
Yff[a:b] = Y_vsc
Yft[a:b] = -self.vsc_m * np.exp(1.0j * self.vsc_theta) * Y_vsc
Ytf[a:b] = -self.vsc_m * np.exp(-1.0j * self.vsc_theta) * Y_vsc
Ytt[a:b] = self.vsc_Gsw + self.vsc_m * self.vsc_m * (Y_vsc + 1.0j * self.vsc_Beq)
Yffs[a:b] = Y_vsc
Yfts[a:b] = -self.vsc_m * np.exp(1.0j * self.vsc_theta) * Y_vsc
Ytfs[a:b] = -self.vsc_m * np.exp(-1.0j * self.vsc_theta) * Y_vsc
Ytts[a:b] = self.vsc_m * self.vsc_m * (Y_vsc + 1.0j)
# HVDC LINE MODEL ----------------------------------------------------------------------------------------------
# does not apply since the HVDC-line model is the simplistic 2-generator model
# SHUNT --------------------------------------------------------------------------------------------------------
self.Yshunt_from_devices = self.C_bus_shunt * (self.shunt_admittance * self.shunt_active / self.Sbase).T
# form the admittance matrices ---------------------------------------------------------------------------------
self.Yf = sp.diags(Yff) * Cf + sp.diags(Yft) * Ct
self.Yt = sp.diags(Ytf) * Cf + sp.diags(Ytt) * Ct
self.Ybus = sp.csc_matrix(Cf.T * self.Yf + Ct.T * self.Yt)
# form the admittance matrices of the series and shunt elements ------------------------------------------------
Yfs = sp.diags(Yffs) * Cf + sp.diags(Yfts) * Ct
Yts = sp.diags(Ytfs) * Cf + sp.diags(Ytts) * Ct
self.Yseries = sp.csc_matrix(Cf.T * Yfs + Ct.T * Yts)
self.Yshunt = Cf.T * ysh_br + Ct.T * ysh_br
def get_generator_injections(self):
"""
Compute the active and reactive power of non-controlled generators (assuming all)
:return:
"""
pf2 = np.power(self.generator_pf, 2.0)
pf_sign = (self.generator_pf + 1e-20) / np.abs(self.generator_pf + 1e-20)
Q = pf_sign * self.generator_p * np.sqrt((1.0 - pf2) / (pf2 + 1e-20))
return self.generator_p + 1.0j * Q
def get_battery_injections(self):
"""
Compute the active and reactive power of non-controlled batteries (assuming all)
:return:
"""
pf2 = np.power(self.battery_pf, 2.0)
pf_sign = (self.battery_pf + 1e-20) / np.abs(self.battery_pf + 1e-20)
Q = pf_sign * self.battery_p * np.sqrt((1.0 - pf2) / (pf2 + 1e-20))
return self.battery_p + 1.0j * Q
def compute_injections(self):
"""
Compute the power
:return: nothing, the results are stored in the class
"""
# load
self.Sbus = - self.C_bus_load * (self.load_s * self.load_active).T # MW
# static generators
self.Sbus += self.C_bus_static_generator * (self.static_generator_s * self.static_generator_active).T # MW
# generators
self.Sbus += self.C_bus_gen * (self.get_generator_injections() * self.generator_active).T
# battery
self.Sbus += self.C_bus_batt * (self.get_battery_injections() * self.battery_active).T
# HVDC forced power
if self.nhvdc:
self.Sbus += ((self.hvdc_active * self.hvdc_Pf) * self.C_hvdc_bus_f).T
self.Sbus += ((self.hvdc_active * self.hvdc_Pt) * self.C_hvdc_bus_t).T
self.Sbus /= self.Sbase
def consolidate(self):
"""
Computes the parameters given the filled-in information
:return:
"""
self.compute_injections()
self.vd, self.pq, self.pv, self.pqpv = compile_types(Sbus=self.Sbus[:, 0], types=self.bus_types)
self.compute_admittance_matrices()
def get_structure(self, structure_type) -> pd.DataFrame:
"""
Get a DataFrame with the input.
Arguments:
**structure_type** (str): 'Vbus', 'Sbus', 'Ibus', 'Ybus', 'Yshunt', 'Yseries' or 'Types'
Returns:
pandas DataFrame
"""
if structure_type == 'Vbus':
df = pd.DataFrame(data=self.Vbus, columns=['Voltage (p.u.)'], index=self.bus_names)
elif structure_type == 'Sbus':
df = pd.DataFrame(data=self.Sbus, columns=['Power (p.u.)'], index=self.bus_names)
elif structure_type == 'Ibus':
df = pd.DataFrame(data=self.Ibus, columns=['Current (p.u.)'], index=self.bus_names)
elif structure_type == 'Ybus':
df = pd.DataFrame(data=self.Ybus.toarray(), columns=self.bus_names, index=self.bus_names)
elif structure_type == 'Yshunt':
df = pd.DataFrame(data=self.Yshunt, columns=['Shunt admittance (p.u.)'], index=self.bus_names)
elif structure_type == 'Yseries':
df = pd.DataFrame(data=self.Yseries.toarray(), columns=self.bus_names, index=self.bus_names)
elif structure_type == "B'":
df = pd.DataFrame(data=self.B1.toarray(), columns=self.bus_names, index=self.bus_names)
elif structure_type == "B''":
df = pd.DataFrame(data=self.B2.toarray(), columns=self.bus_names, index=self.bus_names)
elif structure_type == 'Types':
df = pd.DataFrame(data=self.bus_types, columns=['Bus types'], index=self.bus_names)
elif structure_type == 'Qmin':
df = pd.DataFrame(data=self.Qmin_bus, columns=['Qmin'], index=self.bus_names)
elif structure_type == 'Qmax':
df = pd.DataFrame(data=self.Qmax_bus, columns=['Qmax'], index=self.bus_names)
elif structure_type == 'Jacobian':
J = Jacobian(self.Ybus, self.Vbus, self.Ibus, self.pq, self.pqpv)
"""
J11 = dS_dVa[array([pvpq]).T, pvpq].real
J12 = dS_dVm[array([pvpq]).T, pq].real
J21 = dS_dVa[array([pq]).T, pvpq].imag
J22 = dS_dVm[array([pq]).T, pq].imag
"""
npq = len(self.pq)
npv = len(self.pv)
npqpv = npq + npv
cols = ['dS/dVa'] * npqpv + ['dS/dVm'] * npq
rows = cols
df = pd.DataFrame(data=J.toarray(), columns=cols, index=rows)
else:
raise Exception('PF input: structure type not found')
return df
def get_opf_time_island(self, bus_idx, time_idx) -> "OpfTimeCircuit":
"""
Get the island corresponding to the given buses
:param bus_idx: array of bus indices
:param time_idx: array of time indices
:return: TiTimeCircuitmeIsland
"""
# find the indices of the devices of the island
line_idx = tp.get_elements_of_the_island(self.C_line_bus, bus_idx)
tr_idx = tp.get_elements_of_the_island(self.C_tr_bus, bus_idx)
vsc_idx = tp.get_elements_of_the_island(self.C_vsc_bus, bus_idx)
hvdc_idx = tp.get_elements_of_the_island(self.C_hvdc_bus_f + self.C_hvdc_bus_t, bus_idx)
br_idx = tp.get_elements_of_the_island(self.C_branch_bus_f + self.C_branch_bus_t, bus_idx)
load_idx = tp.get_elements_of_the_island(self.C_bus_load.T, bus_idx)
stagen_idx = tp.get_elements_of_the_island(self.C_bus_static_generator.T, bus_idx)
gen_idx = tp.get_elements_of_the_island(self.C_bus_gen.T, bus_idx)
batt_idx = tp.get_elements_of_the_island(self.C_bus_batt.T, bus_idx)
shunt_idx = tp.get_elements_of_the_island(self.C_bus_shunt.T, bus_idx)
nc = OpfTimeCircuit(nbus=len(bus_idx),
nline=len(line_idx),
ntr=len(tr_idx),
nvsc=len(vsc_idx),
nhvdc=len(hvdc_idx),
nload=len(load_idx),
ngen=len(gen_idx),
nbatt=len(batt_idx),
nshunt=len(shunt_idx),
nstagen=len(stagen_idx),
ntime=len(time_idx),
sbase=self.Sbase,
time_array=self.time_array[time_idx],
apply_temperature=self.apply_temperature,
branch_tolerance_mode=self.branch_tolerance_mode)
nc.original_time_idx = time_idx
nc.original_bus_idx = bus_idx
nc.original_branch_idx = br_idx
nc.original_tr_idx = tr_idx
nc.original_gen_idx = gen_idx
nc.original_bat_idx = batt_idx
# bus ----------------------------------------------------------------------------------------------------------
nc.bus_names = self.bus_names[bus_idx]
nc.bus_types = self.bus_types[bus_idx]
nc.bus_installed_power = self.bus_installed_power[bus_idx]
nc.bus_active = self.bus_active[np.ix_(time_idx, bus_idx)]
nc.Vbus = self.Vbus[np.ix_(time_idx, bus_idx)]
# branch common ------------------------------------------------------------------------------------------------
nc.branch_names = self.branch_names[br_idx]
nc.branch_active = self.branch_active[np.ix_(time_idx, br_idx)]
nc.branch_rates = self.branch_rates[np.ix_(time_idx, br_idx)]
nc.branch_cost = self.branch_cost[np.ix_(time_idx, br_idx)]
nc.branch_R = self.branch_R[br_idx]
nc.branch_X = self.branch_X[br_idx]
nc.F = self.F[br_idx]
nc.T = self.T[br_idx]
nc.C_branch_bus_f = self.C_branch_bus_f[np.ix_(br_idx, bus_idx)]
nc.C_branch_bus_t = self.C_branch_bus_t[np.ix_(br_idx, bus_idx)]
# lines --------------------------------------------------------------------------------------------------------
nc.line_names = self.line_names[line_idx]
nc.line_R = self.line_R[line_idx]
nc.line_X = self.line_X[line_idx]
nc.line_B = self.line_B[line_idx]
nc.line_temp_base = self.line_temp_base[line_idx]
nc.line_temp_oper = self.line_temp_oper[line_idx]
nc.line_alpha = self.line_alpha[line_idx]
nc.line_impedance_tolerance = self.line_impedance_tolerance[line_idx]
nc.C_line_bus = self.C_line_bus[np.ix_(line_idx, bus_idx)]
# transformer 2W + 3W ------------------------------------------------------------------------------------------
nc.tr_names = self.tr_names[tr_idx]
nc.tr_R = self.tr_R[tr_idx]
nc.tr_X = self.tr_X[tr_idx]
nc.tr_G = self.tr_G[tr_idx]
nc.tr_B = self.tr_B[tr_idx]
nc.tr_tap_f = self.tr_tap_f[tr_idx]
nc.tr_tap_t = self.tr_tap_t[tr_idx]
nc.tr_tap_mod = self.tr_tap_mod[tr_idx]
nc.tr_tap_ang = self.tr_tap_ang[tr_idx]
nc.C_tr_bus = self.C_tr_bus[np.ix_(tr_idx, bus_idx)]
# hvdc line ----------------------------------------------------------------------------------------------------
nc.hvdc_names = self.hvdc_names[hvdc_idx]
nc.hvdc_active = self.hvdc_active[np.ix_(time_idx, hvdc_idx)]
nc.hvdc_rate = self.hvdc_rate[np.ix_(time_idx, hvdc_idx)]
nc.hvdc_Pf = self.hvdc_Pf[np.ix_(time_idx, hvdc_idx)]
nc.hvdc_Pt = self.hvdc_Pt[np.ix_(time_idx, hvdc_idx)]
nc.C_hvdc_bus_f = self.C_hvdc_bus_f[np.ix_(hvdc_idx, bus_idx)]
nc.C_hvdc_bus_t = self.C_hvdc_bus_t[np.ix_(hvdc_idx, bus_idx)]
# vsc converter ------------------------------------------------------------------------------------------------
nc.vsc_names = self.vsc_names[vsc_idx]
nc.vsc_R1 = self.vsc_R1[vsc_idx]
nc.vsc_X1 = self.vsc_X1[vsc_idx]
nc.vsc_Gsw = self.vsc_Gsw[vsc_idx]
nc.vsc_Beq = self.vsc_Beq[vsc_idx]
nc.vsc_m = self.vsc_m[vsc_idx]
nc.vsc_theta = self.vsc_theta[vsc_idx]
nc.C_vsc_bus = self.C_vsc_bus[np.ix_(vsc_idx, bus_idx)]
# load ---------------------------------------------------------------------------------------------------------
nc.load_names = self.load_names[load_idx]
nc.load_active = self.load_active[np.ix_(time_idx, load_idx)]
nc.load_s = self.load_s[ | np.ix_(time_idx, load_idx) | numpy.ix_ |
from functools import lru_cache
from random import randint
from imageio import imread
import numpy as np
import cv2
import torch
import h5py
class CachedImageReader():
@staticmethod
@lru_cache(maxsize=6)
def _read(path):
return imread(path)
def __init__(self, keys=['img','mask','depth']):
self.keys = keys
def __call__(self, sample):
for k in self.keys:
sample[k] = self._read(sample[k])
return sample
def OtherHandMasker():
def f(sample):
box_other = sample['box_other']
box_own = sample['box_own']
mask = np.ones_like(sample['img'][:,:,0])
mask[
box_other[1]:box_other[3],
box_other[0]:box_other[2]
] = 0
# making sure current hand is not obscured
mask[
box_own[1]:box_own[3],
box_own[0]:box_own[2]
] = 1
sample['img'] = sample['img'] * mask[:,:,None]
return sample
return f
def DepthDecoder():
""" Converts a RGB-coded depth into float valued depth. """
def f(sample):
encoded = sample['depth']
top_bits, bottom_bits = encoded[:,:,0], encoded[:,:,1]
depth_map = (top_bits * 2**8 + bottom_bits).astype('float32')
depth_map /= float(2**16 - 1)
depth_map *= 5.0
return depth_map
return f
class NormalizeMeanStd(object):
def __init__(self, hmap=True):
self.hmap = hmap
def __call__(self, sample):
mean = np.r_[[0.485, 0.456, 0.406]]
std = np.r_[[0.229, 0.224, 0.225]]
sample['img'] = (sample['img'].astype('float32') / 255 - mean) / std
if self.hmap:
sample['hmap'] = sample['hmap'].astype('float32') / sample['hmap'].max()
return sample
class NormalizeMax(object):
def __init__(self, keys=['img','hmap']):
self.keys = keys
def __call__(self, sample):
for k in self.keys:
maxval = sample[k].max()
if maxval:
sample[k] = sample[k].astype('float32') / maxval
return sample
class Coords2Hmap():
def __init__(self, sigma, shape=(128,128), coords_scaling=1):
self.sigma = sigma
self.hmap_shape = shape
self.c_scale = coords_scaling
def __call__(self, sample):
hmap = np.zeros((*self.hmap_shape, 21),'float32')
coords = sample['coords']
hmap[np.clip((coords[:,0] * self.c_scale).astype('uint'), 0, self.hmap_shape[0]-1),
np.clip((coords[:,1] * self.c_scale).astype('uint'),0, self.hmap_shape[1]-1),
np.arange(21)] = 10
sample['hmap'] = cv2.GaussianBlur(hmap, (35, 35), self.sigma)
return sample
class AffineTransform():
def __init__(self, img_size=(256,256),
scale_min=0.8, scale_max=1.3,
translation_max=45):
self.tmax = translation_max
self.scale = (int(scale_min*10), int(scale_max*10))
self.img_size = img_size
self.center = (img_size[0]//2, img_size[1]//2)
self.crd_max = max(img_size)-1
@staticmethod
def _pad1(M):
return np.pad(
M, ((0,0),(0,1)),
mode='constant',
constant_values=1)
def __call__(self, sample):
M = cv2.getRotationMatrix2D(
self.center, randint(-90,90),
randint(*self.scale) / 10)
M[:,2:] += np.random.uniform(-self.tmax, self.tmax, (2,1))
sample['img'] = cv2.warpAffine(
sample['img'],
M, self.img_size,
borderMode=cv2.BORDER_REFLECT)
Mpad = self._pad1(M.T)
Mpad[:2,2:] = 0
crd_t = self._pad1(sample['coords']) @ Mpad
sample['coords'] = np.clip(crd_t[:,:2], 1, self.crd_max)
return sample
class CenterNCrop():
def __init__(self, in_shape, out_size, pad_radius=30):
self.in_shape = in_shape
self.out_size = out_size
self.pad_radius = pad_radius
@staticmethod
def _getCircle(coords):
min_ = coords.min(axis=0)
max_ = coords.max(axis=0)
center = min_ + (max_ - min_) / 2
radius = np.sqrt(((max_ - center)**2).sum())
return center, radius
@staticmethod
def circle2BB(circle, pad_radius):
cnt, rad = circle
rad = rad + pad_radius
ymin, ymax = int(cnt[0]-rad), int(cnt[0]+rad)
xmin, xmax = int(cnt[1]-rad), int(cnt[1]+rad)
return xmin, xmax, ymin, ymax
def __call__(self, sample):
"""
Input {'img': (*in_shape,3), 'coords': (21,2), *}
Output {'img': (out_size,out_size,3), 'coords': (21,2), *}
"""
img, coords = sample['img'], sample['coords']
crcl = self._getCircle(coords)
xmin, xmax, ymin, ymax = self.circle2BB(crcl, self.pad_radius)
pmin, pmax = 0, 0
if xmin < 0 or ymin < 0:
pmin = np.abs(min(xmin, ymin))
if xmax > self.in_shape[0] or ymax > self.in_shape[1]:
pmax = max(xmax - self.in_shape[0], ymax - self.in_shape[1])
sample['yx_min_max'] = np.r_[[ymin, xmin, ymax, xmax]]
img_pad = np.pad(img, ((pmin, pmax), (pmin, pmax), (0,0)), mode='wrap')
if 'mask' in sample:
mask = sample['mask']
mask_pad = np.pad(mask, ((pmin, pmax), (pmin, pmax)), mode='wrap')
xmin += pmin
ymin += pmin
xmax += pmin
ymax += pmin
img_crop = img_pad[ymin:ymax,xmin:xmax,:]
if 'mask' in sample:
mask_crop = mask_pad[ymin:ymax, xmin:xmax]
coords += np.c_[pmin, pmin].astype('uint')
rescale = self.out_size / (xmax - xmin)
img_resized = cv2.resize(img_crop, (self.out_size, self.out_size))
if 'mask' in sample:
mask_resized = cv2.resize(mask_crop, (self.out_size, self.out_size))
coords = coords - np.c_[ymin, xmin]
coords = coords*rescale
sample['img'] = img_resized
sample['coords'] = coords.round().astype('uint8')
if 'mask' in sample:
sample['mask'] = mask_resized
return sample
class CropLikeGoogle():
def __init__(self, out_size, box_enlarge=1.5, rand=False):
self.out_size = out_size
self.box_enlarge = box_enlarge
self.R90 = np.r_[[[0,1],[-1,0]]]
half = out_size // 2
self._target_triangle = np.float32([
[half, half],
[half, 0],
[ 0, half]
])
self.rand = rand
def get_triangle(self, kp0, kp2, dist=1):
"""get a triangle used to calculate Affine transformation matrix"""
dir_v = kp2 - kp0
dir_v /= np.linalg.norm(dir_v)
dir_v_r = dir_v @ self.R90.T
return np.float32([kp2, kp2+dir_v*dist, kp2 + dir_v_r*dist])
@staticmethod
def triangle_to_bbox(source):
# plain old vector arithmetics
bbox = np.c_[
[source[2] - source[0] + source[1]],
[source[1] + source[0] - source[2]],
[3 * source[0] - source[1] - source[2]],
[source[2] - source[1] + source[0]],
].reshape(-1,2)
return bbox
@staticmethod
def _pad1(x):
return | np.pad(x, ((0,0),(0,1)), constant_values=1, mode='constant') | numpy.pad |
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Common data processing utilities that are used in a
typical object detection data pipeline.
"""
import logging
import numpy as np
from typing import List, Union
import pycocotools.mask as mask_util
import torch
from PIL import Image
from pixellib.torchbackend.instance.structures.masks import BitMasks, PolygonMasks, polygons_to_bitmask
from pixellib.torchbackend.instance.structures.boxes import Boxes, BoxMode
from pixellib.torchbackend.instance.structures.instances import Instances
from pixellib.torchbackend.instance.structures.boxes import _maybe_jit_unused
from typing import List, Tuple
'''from structures import (
BitMasks,
Boxes,
BoxMode,
Instances,
Keypoints,
PolygonMasks,
RotatedBoxes,
polygons_to_bitmask,
)
'''
from pixellib.torchbackend.instance.utils.file_io import PathManager
import pixellib.torchbackend.instance.data.transforms as T
from .catalogdata import MetadataCatalog
__all__ = [
"SizeMismatchError",
"convert_image_to_rgb",
"check_image_size",
"transform_proposals",
"transform_instance_annotations",
"annotations_to_instances",
"annotations_to_instances_rotated",
"build_augmentation",
"build_transform_gen",
"create_keypoint_hflip_indices",
"filter_empty_instances",
"read_image",
]
class RotatedBoxes(Boxes):
"""
This structure stores a list of rotated boxes as a Nx5 torch.Tensor.
It supports some common methods about boxes
(`area`, `clip`, `nonempty`, etc),
and also behaves like a Tensor
(support indexing, `to(device)`, `.device`, and iteration over all boxes)
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor (Tensor[float]): a Nx5 matrix. Each row is
(x_center, y_center, width, height, angle),
in which angle is represented in degrees.
While there's no strict range restriction for it,
the recommended principal range is between [-180, 180) degrees.
Assume we have a horizontal box B = (x_center, y_center, width, height),
where width is along the x-axis and height is along the y-axis.
The rotated box B_rot (x_center, y_center, width, height, angle)
can be seen as:
1. When angle == 0:
B_rot == B
2. When angle > 0:
B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CCW;
3. When angle < 0:
B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CW.
Mathematically, since the right-handed coordinate system for image space
is (y, x), where y is top->down and x is left->right, the 4 vertices of the
rotated rectangle :math:`(yr_i, xr_i)` (i = 1, 2, 3, 4) can be obtained from
the vertices of the horizontal rectangle :math:`(y_i, x_i)` (i = 1, 2, 3, 4)
in the following way (:math:`\\theta = angle*\\pi/180` is the angle in radians,
:math:`(y_c, x_c)` is the center of the rectangle):
.. math::
yr_i = \\cos(\\theta) (y_i - y_c) - \\sin(\\theta) (x_i - x_c) + y_c,
xr_i = \\sin(\\theta) (y_i - y_c) + \\cos(\\theta) (x_i - x_c) + x_c,
which is the standard rigid-body rotation transformation.
Intuitively, the angle is
(1) the rotation angle from y-axis in image space
to the height vector (top->down in the box's local coordinate system)
of the box in CCW, and
(2) the rotation angle from x-axis in image space
to the width vector (left->right in the box's local coordinate system)
of the box in CCW.
More intuitively, consider the following horizontal box ABCD represented
in (x1, y1, x2, y2): (3, 2, 7, 4),
covering the [3, 7] x [2, 4] region of the continuous coordinate system
which looks like this:
.. code:: none
O--------> x
|
| A---B
| | |
| D---C
|
v y
Note that each capital letter represents one 0-dimensional geometric point
instead of a 'square pixel' here.
In the example above, using (x, y) to represent a point we have:
.. math::
O = (0, 0), A = (3, 2), B = (7, 2), C = (7, 4), D = (3, 4)
We name vector AB = vector DC as the width vector in box's local coordinate system, and
vector AD = vector BC as the height vector in box's local coordinate system. Initially,
when angle = 0 degree, they're aligned with the positive directions of x-axis and y-axis
in the image space, respectively.
For better illustration, we denote the center of the box as E,
.. code:: none
O--------> x
|
| A---B
| | E |
| D---C
|
v y
where the center E = ((3+7)/2, (2+4)/2) = (5, 3).
Also,
.. math::
width = |AB| = |CD| = 7 - 3 = 4,
height = |AD| = |BC| = 4 - 2 = 2.
Therefore, the corresponding representation for the same shape in rotated box in
(x_center, y_center, width, height, angle) format is:
(5, 3, 4, 2, 0),
Now, let's consider (5, 3, 4, 2, 90), which is rotated by 90 degrees
CCW (counter-clockwise) by definition. It looks like this:
.. code:: none
O--------> x
| B-C
| | |
| |E|
| | |
| A-D
v y
The center E is still located at the same point (5, 3), while the vertices
ABCD are rotated by 90 degrees CCW with regard to E:
A = (4, 5), B = (4, 1), C = (6, 1), D = (6, 5)
Here, 90 degrees can be seen as the CCW angle to rotate from y-axis to
vector AD or vector BC (the top->down height vector in box's local coordinate system),
or the CCW angle to rotate from x-axis to vector AB or vector DC (the left->right
width vector in box's local coordinate system).
.. math::
width = |AB| = |CD| = 5 - 1 = 4,
height = |AD| = |BC| = 6 - 4 = 2.
Next, how about (5, 3, 4, 2, -90), which is rotated by 90 degrees CW (clockwise)
by definition? It looks like this:
.. code:: none
O--------> x
| D-A
| | |
| |E|
| | |
| C-B
v y
The center E is still located at the same point (5, 3), while the vertices
ABCD are rotated by 90 degrees CW with regard to E:
A = (6, 1), B = (6, 5), C = (4, 5), D = (4, 1)
.. math::
width = |AB| = |CD| = 5 - 1 = 4,
height = |AD| = |BC| = 6 - 4 = 2.
This covers exactly the same region as (5, 3, 4, 2, 90) does, and their IoU
will be 1. However, these two will generate different RoI Pooling results and
should not be treated as an identical box.
On the other hand, it's easy to see that (X, Y, W, H, A) is identical to
(X, Y, W, H, A+360N), for any integer N. For example (5, 3, 4, 2, 270) would be
identical to (5, 3, 4, 2, -90), because rotating the shape 270 degrees CCW is
equivalent to rotating the same shape 90 degrees CW.
We could rotate further to get (5, 3, 4, 2, 180), or (5, 3, 4, 2, -180):
.. code:: none
O--------> x
|
| C---D
| | E |
| B---A
|
v y
.. math::
A = (7, 4), B = (3, 4), C = (3, 2), D = (7, 2),
width = |AB| = |CD| = 7 - 3 = 4,
height = |AD| = |BC| = 4 - 2 = 2.
Finally, this is a very inaccurate (heavily quantized) illustration of
how (5, 3, 4, 2, 60) looks like in case anyone wonders:
.. code:: none
O--------> x
| B\
| / C
| /E /
| A /
| `D
v y
It's still a rectangle with center of (5, 3), width of 4 and height of 2,
but its angle (and thus orientation) is somewhere between
(5, 3, 4, 2, 0) and (5, 3, 4, 2, 90).
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
if tensor.numel() == 0:
# Use reshape, so we don't end up creating a new tensor that does not depend on
# the inputs (and consequently confuses jit)
tensor = tensor.reshape((0, 5)).to(dtype=torch.float32, device=device)
assert tensor.dim() == 2 and tensor.size(-1) == 5, tensor.size()
self.tensor = tensor
def clone(self) -> "RotatedBoxes":
"""
Clone the RotatedBoxes.
Returns:
RotatedBoxes
"""
return RotatedBoxes(self.tensor.clone())
@_maybe_jit_unused
def to(self, device: torch.device):
# Boxes are assumed float32 and does not support to(dtype)
return RotatedBoxes(self.tensor.to(device=device))
def area(self) -> torch.Tensor:
"""
Computes the area of all the boxes.
Returns:
torch.Tensor: a vector with areas of each box.
"""
box = self.tensor
area = box[:, 2] * box[:, 3]
return area
def normalize_angles(self) -> None:
"""
Restrict angles to the range of [-180, 180) degrees
"""
self.tensor[:, 4] = (self.tensor[:, 4] + 180.0) % 360.0 - 180.0
def clip(self, box_size: Tuple[int, int], clip_angle_threshold: float = 1.0) -> None:
"""
Clip (in place) the boxes by limiting x coordinates to the range [0, width]
and y coordinates to the range [0, height].
For RRPN:
Only clip boxes that are almost horizontal with a tolerance of
clip_angle_threshold to maintain backward compatibility.
Rotated boxes beyond this threshold are not clipped for two reasons:
1. There are potentially multiple ways to clip a rotated box to make it
fit within the image.
2. It's tricky to make the entire rectangular box fit within the image
and still be able to not leave out pixels of interest.
Therefore we rely on ops like RoIAlignRotated to safely handle this.
Args:
box_size (height, width): The clipping box's size.
clip_angle_threshold:
Iff. abs(normalized(angle)) <= clip_angle_threshold (in degrees),
we do the clipping as horizontal boxes.
"""
h, w = box_size
# normalize angles to be within (-180, 180] degrees
self.normalize_angles()
idx = torch.where(torch.abs(self.tensor[:, 4]) <= clip_angle_threshold)[0]
# convert to (x1, y1, x2, y2)
x1 = self.tensor[idx, 0] - self.tensor[idx, 2] / 2.0
y1 = self.tensor[idx, 1] - self.tensor[idx, 3] / 2.0
x2 = self.tensor[idx, 0] + self.tensor[idx, 2] / 2.0
y2 = self.tensor[idx, 1] + self.tensor[idx, 3] / 2.0
# clip
x1.clamp_(min=0, max=w)
y1.clamp_(min=0, max=h)
x2.clamp_(min=0, max=w)
y2.clamp_(min=0, max=h)
# convert back to (xc, yc, w, h)
self.tensor[idx, 0] = (x1 + x2) / 2.0
self.tensor[idx, 1] = (y1 + y2) / 2.0
# make sure widths and heights do not increase due to numerical errors
self.tensor[idx, 2] = torch.min(self.tensor[idx, 2], x2 - x1)
self.tensor[idx, 3] = torch.min(self.tensor[idx, 3], y2 - y1)
def nonempty(self, threshold: float = 0.0) -> torch.Tensor:
"""
Find boxes that are non-empty.
A box is considered empty, if either of its side is no larger than threshold.
Returns:
Tensor: a binary vector which represents
whether each box is empty (False) or non-empty (True).
"""
box = self.tensor
widths = box[:, 2]
heights = box[:, 3]
keep = (widths > threshold) & (heights > threshold)
return keep
def __getitem__(self, item) -> "RotatedBoxes":
"""
Returns:
RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing.
The following usage are allowed:
1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box.
2. `new_boxes = boxes[2:10]`: return a slice of boxes.
3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor
with `length = len(boxes)`. Nonzero elements in the vector will be selected.
Note that the returned RotatedBoxes might share storage with this RotatedBoxes,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return RotatedBoxes(self.tensor[item].view(1, -1))
b = self.tensor[item]
assert b.dim() == 2, "Indexing on RotatedBoxes with {} failed to return a matrix!".format(
item
)
return RotatedBoxes(b)
def __len__(self) -> int:
return self.tensor.shape[0]
def __repr__(self) -> str:
return "RotatedBoxes(" + str(self.tensor) + ")"
def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:
"""
Args:
box_size (height, width): Size of the reference box covering
[0, width] x [0, height]
boundary_threshold (int): Boxes that extend beyond the reference box
boundary by more than boundary_threshold are considered "outside".
For RRPN, it might not be necessary to call this function since it's common
for rotated box to extend to outside of the image boundaries
(the clip function only clips the near-horizontal boxes)
Returns:
a binary vector, indicating whether each box is inside the reference box.
"""
height, width = box_size
cnt_x = self.tensor[..., 0]
cnt_y = self.tensor[..., 1]
half_w = self.tensor[..., 2] / 2.0
half_h = self.tensor[..., 3] / 2.0
a = self.tensor[..., 4]
c = torch.abs(torch.cos(a * math.pi / 180.0))
s = torch.abs(torch.sin(a * math.pi / 180.0))
# This basically computes the horizontal bounding rectangle of the rotated box
max_rect_dx = c * half_w + s * half_h
max_rect_dy = c * half_h + s * half_w
inds_inside = (
(cnt_x - max_rect_dx >= -boundary_threshold)
& (cnt_y - max_rect_dy >= -boundary_threshold)
& (cnt_x + max_rect_dx < width + boundary_threshold)
& (cnt_y + max_rect_dy < height + boundary_threshold)
)
return inds_inside
def get_centers(self) -> torch.Tensor:
"""
Returns:
The box centers in a Nx2 array of (x, y).
"""
return self.tensor[:, :2]
def scale(self, scale_x: float, scale_y: float) -> None:
"""
Scale the rotated box with horizontal and vertical scaling factors
Note: when scale_factor_x != scale_factor_y,
the rotated box does not preserve the rectangular shape when the angle
is not a multiple of 90 degrees under resize transformation.
Instead, the shape is a parallelogram (that has skew)
Here we make an approximation by fitting a rotated rectangle to the parallelogram.
"""
self.tensor[:, 0] *= scale_x
self.tensor[:, 1] *= scale_y
theta = self.tensor[:, 4] * math.pi / 180.0
c = torch.cos(theta)
s = torch.sin(theta)
# In image space, y is top->down and x is left->right
# Consider the local coordintate system for the rotated box,
# where the box center is located at (0, 0), and the four vertices ABCD are
# A(-w / 2, -h / 2), B(w / 2, -h / 2), C(w / 2, h / 2), D(-w / 2, h / 2)
# the midpoint of the left edge AD of the rotated box E is:
# E = (A+D)/2 = (-w / 2, 0)
# the midpoint of the top edge AB of the rotated box F is:
# F(0, -h / 2)
# To get the old coordinates in the global system, apply the rotation transformation
# (Note: the right-handed coordinate system for image space is yOx):
# (old_x, old_y) = (s * y + c * x, c * y - s * x)
# E(old) = (s * 0 + c * (-w/2), c * 0 - s * (-w/2)) = (-c * w / 2, s * w / 2)
# F(old) = (s * (-h / 2) + c * 0, c * (-h / 2) - s * 0) = (-s * h / 2, -c * h / 2)
# After applying the scaling factor (sfx, sfy):
# E(new) = (-sfx * c * w / 2, sfy * s * w / 2)
# F(new) = (-sfx * s * h / 2, -sfy * c * h / 2)
# The new width after scaling tranformation becomes:
# w(new) = |E(new) - O| * 2
# = sqrt[(sfx * c * w / 2)^2 + (sfy * s * w / 2)^2] * 2
# = sqrt[(sfx * c)^2 + (sfy * s)^2] * w
# i.e., scale_factor_w = sqrt[(sfx * c)^2 + (sfy * s)^2]
#
# For example,
# when angle = 0 or 180, |c| = 1, s = 0, scale_factor_w == scale_factor_x;
# when |angle| = 90, c = 0, |s| = 1, scale_factor_w == scale_factor_y
self.tensor[:, 2] *= torch.sqrt((scale_x * c) ** 2 + (scale_y * s) ** 2)
# h(new) = |F(new) - O| * 2
# = sqrt[(sfx * s * h / 2)^2 + (sfy * c * h / 2)^2] * 2
# = sqrt[(sfx * s)^2 + (sfy * c)^2] * h
# i.e., scale_factor_h = sqrt[(sfx * s)^2 + (sfy * c)^2]
#
# For example,
# when angle = 0 or 180, |c| = 1, s = 0, scale_factor_h == scale_factor_y;
# when |angle| = 90, c = 0, |s| = 1, scale_factor_h == scale_factor_x
self.tensor[:, 3] *= torch.sqrt((scale_x * s) ** 2 + (scale_y * c) ** 2)
# The angle is the rotation angle from y-axis in image space to the height
# vector (top->down in the box's local coordinate system) of the box in CCW.
#
# angle(new) = angle_yOx(O - F(new))
# = angle_yOx( (sfx * s * h / 2, sfy * c * h / 2) )
# = atan2(sfx * s * h / 2, sfy * c * h / 2)
# = atan2(sfx * s, sfy * c)
#
# For example,
# when sfx == sfy, angle(new) == atan2(s, c) == angle(old)
self.tensor[:, 4] = torch.atan2(scale_x * s, scale_y * c) * 180 / math.pi
@classmethod
@_maybe_jit_unused
def cat(cls, boxes_list: List["RotatedBoxes"]) -> "RotatedBoxes":
"""
Concatenates a list of RotatedBoxes into a single RotatedBoxes
Arguments:
boxes_list (list[RotatedBoxes])
Returns:
RotatedBoxes: the concatenated RotatedBoxes
"""
assert isinstance(boxes_list, (list, tuple))
if len(boxes_list) == 0:
return cls(torch.empty(0))
assert all([isinstance(box, RotatedBoxes) for box in boxes_list])
# use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input
cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))
return cat_boxes
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __iter__(self):
"""
Yield a box as a Tensor of shape (5,) at a time.
"""
yield from self.tensor
def pairwise_iou(boxes1: RotatedBoxes, boxes2: RotatedBoxes) -> None:
"""
Given two lists of rotated boxes of size N and M,
compute the IoU (intersection over union)
between **all** N x M pairs of boxes.
The box order must be (x_center, y_center, width, height, angle).
Args:
boxes1, boxes2 (RotatedBoxes):
two `RotatedBoxes`. Contains N & M rotated boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
return pairwise_iou_rotated(boxes1.tensor, boxes2.tensor)
class SizeMismatchError(ValueError):
"""
When loaded image has difference width/height compared with annotation.
"""
# https://en.wikipedia.org/wiki/YUV#SDTV_with_BT.601
_M_RGB2YUV = [[0.299, 0.587, 0.114], [-0.14713, -0.28886, 0.436], [0.615, -0.51499, -0.10001]]
_M_YUV2RGB = [[1.0, 0.0, 1.13983], [1.0, -0.39465, -0.58060], [1.0, 2.03211, 0.0]]
# https://www.exiv2.org/tags.html
_EXIF_ORIENT = 274 # exif 'Orientation' tag
def convert_PIL_to_numpy(image, format):
"""
Convert PIL image to numpy array of target format.
Args:
image (PIL.Image): a PIL image
format (str): the format of output image
Returns:
(np.ndarray): also see `read_image`
"""
if format is not None:
# PIL only supports RGB, so convert to RGB and flip channels over below
conversion_format = format
if format in ["BGR", "YUV-BT.601"]:
conversion_format = "RGB"
image = image.convert(conversion_format)
image = np.asarray(image)
# PIL squeezes out the channel dimension for "L", so make it HWC
if format == "L":
image = np.expand_dims(image, -1)
# handle formats not supported by PIL
elif format == "BGR":
# flip channels if needed
image = image[:, :, ::-1]
elif format == "YUV-BT.601":
image = image / 255.0
image = np.dot(image, np.array(_M_RGB2YUV).T)
return image
def convert_image_to_rgb(image, format):
"""
Convert an image from given format to RGB.
Args:
image (np.ndarray or Tensor): an HWC image
format (str): the format of input image, also see `read_image`
Returns:
(np.ndarray): (H,W,3) RGB image in 0-255 range, can be either float or uint8
"""
if isinstance(image, torch.Tensor):
image = image.cpu().numpy()
if format == "BGR":
image = image[:, :, [2, 1, 0]]
elif format == "YUV-BT.601":
image = np.dot(image, | np.array(_M_YUV2RGB) | numpy.array |
from deep_rl.utils.misc import random_seed
import numpy as np
import pickle
from generative_playground.utils.gpu_utils import get_gpu_memory_map
def run_iterations(agent, visdom = None, invalid_value = None):
if visdom is not None:
have_visdom = True
random_seed()
config = agent.config
agent_name = agent.__class__.__name__
iteration = 0
steps = []
rewards = []
sm_metrics = np.array([[invalid_value,invalid_value,invalid_value]])
while True:
agent.iteration()
steps.append(agent.total_steps)
rewards.append(np.mean(agent.last_episode_rewards))
if iteration % config.iteration_log_interval == 0:
config.logger.info('total steps %d, mean/max/min reward %f/%f/%f' % (
agent.total_steps, | np.mean(agent.last_episode_rewards) | numpy.mean |
import glob
import os
import sys
import random
import time
import numpy as np
import cv2
import math
import pickle
from collections import deque
import tensorflow as tf
import matplotlib.pyplot as plt
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
from threading import Thread
from tqdm import tqdm
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
from carla import ColorConverter as cc
SHOW_PREVIEW = True
IM_WIDTH = 360
IM_HEIGHT = 240
SECONDS_PER_EPISODE = 30
REPLAY_MEMORY_SIZE = 20_000
MIN_REPLAY_MEMORY_SIZE = 1_000
MINIBATCH_SIZE = 16
PREDICTION_BATCH_SIZE = 1
TRAINING_BATCH_SIZE = MINIBATCH_SIZE // 4
UPDATE_TARGET_EVERY = 5
MODEL_NAME = "Xception"
MEMORY_FRACTION = 0.4
MIN_REWARD = -200
EPISODES = 40_000
DISCOUNT = 0.99
epsilon = 1
EPSILON_DECAY = 0.99995 #0.95 #0.99975
MIN_EPSILON = 0.001
AGGREGATE_STATS_EVERY = 10
PREDICTED_ANGLES = [-1, 0, 1]
class CarEnv:
SHOW_CAM = SHOW_PREVIEW
im_width = IM_WIDTH
im_height = IM_HEIGHT
front_camera = None
def __init__(self):
self.client = carla.Client("127.0.0.1", 2000)
self.client.set_timeout(10.0)
self.world = self.client.get_world()
self.world.constant_velocity_enabled=True
self.blueprint_library = self.world.get_blueprint_library()
self.model_3 = self.blueprint_library.filter("model3")[0]
def restart(self):
self.collision_hist = deque(maxlen=2000)
self.actor_list = deque(maxlen=2000)
self.lane_invasion = deque(maxlen=2000)
self.obstacle_distance = deque(maxlen=2000)
self.transform = random.choice(self.world.get_map().get_spawn_points())
self.vehicle = self.world.spawn_actor(self.model_3, self.transform)
self.actor_list.append(self.vehicle)
# self.rgb_cam = self.blueprint_library.find('sensor.camera.rgb')
# self.rgb_cam.set_attribute("image_size_x", f"{self.im_width}")
# self.rgb_cam.set_attribute("image_size_y", f"{self.im_height}")
# self.rgb_cam.set_attribute("fov", f"110")
self.ss_cam = self.blueprint_library.find('sensor.camera.semantic_segmentation')
self.ss_cam.set_attribute("image_size_x", f"{self.im_width}")
self.ss_cam.set_attribute("image_size_y", f"{self.im_height}")
self.ss_cam.set_attribute("fov", f"110")
transform = carla.Transform(carla.Location(x=2.5, z=0.7))
self.sensor = self.world.spawn_actor(self.ss_cam, transform, attach_to=self.vehicle)
self.actor_list.append(self.sensor)
self.sensor.listen(lambda data: self.process_img(data))
self.vehicle.apply_control(carla.VehicleControl(throttle=0.0, brake=0.0))
self.vehicle.enable_constant_velocity(carla.Vector3D(11, 0, 0))
time.sleep(4)
col_sensor = self.blueprint_library.find("sensor.other.collision")
self.col_sensor = self.world.spawn_actor(col_sensor, transform, attach_to=self.vehicle)
self.actor_list.append(self.col_sensor)
self.col_sensor.listen(lambda event: self.collision_data(event))
lane_sensor = self.blueprint_library.find("sensor.other.lane_invasion")
self.lane_sensor = self.world.spawn_actor(lane_sensor, transform, attach_to=self.vehicle)
self.actor_list.append(self.lane_sensor)
self.lane_sensor.listen(lambda lanesen: self.lane_data(lanesen))
obs_sensor = self.blueprint_library.find("sensor.other.radar")
self.obs_sensor = self.world.spawn_actor(
obs_sensor,
carla.Transform(
carla.Location(x=2.8, z=1.0),
carla.Rotation(pitch=5)),
attach_to=self.vehicle)
self.actor_list.append(self.obs_sensor)
self.obs_sensor.listen(lambda obsen: self.obs_data(obsen))
while self.front_camera is None:
time.sleep(0.01)
self.episode_start = time.time()
self.vehicle.apply_control(carla.VehicleControl(throttle=0.0, brake=0.0))
return self.front_camera
def collision_data(self, event):
self.collision_hist.append(event)
def lane_data(self, lanesen):
lane_types = set(x.type for x in lanesen.crossed_lane_markings)
text = ['%r' % str(x).split()[-1] for x in lane_types]
self.lane_invasion.append(text[0])
def obs_data(self, obsen):
for detect in obsen:
if detect.depth <= float(30.0):
self.obstacle_distance.append(detect.depth)
def process_img(self, image):
image.convert(cc.CityScapesPalette)
i = np.array(image.raw_data, dtype='uint8')
i2 = i.reshape((self.im_height, self.im_width, 4))
i3 = i2[:, :, :3]
if self.SHOW_CAM:
cv2.imshow("test", i3)
cv2.waitKey(1)
self.front_camera = i3
def get_rewards(self,angle):
done = False
# Assign Reward for Collision
reward_collision = 0
if len(self.collision_hist) > 0:
reward_collision = -6
done = True
#Crossing lanes
reward_lane = 0
if len(self.lane_invasion) > 0:
lane_invasion_error = self.lane_invasion.pop()
#print(lane_invasion_error)
if 'Broken' in lane_invasion_error:
reward_lane += -2
elif 'Solid' in lane_invasion_error:
reward_lane += -4
#Assign reward for obstacle distance
reward_obs = 0
# distance = 40
# if len(self.obstacle_distance) > 0 :
# distance = self.obstacle_distance.pop()
# reward_obs = int(-40 + distance) if (distance < 10) else int(distance)
# Assign reward for steering angle
# reward_steer = 0
# if abs(angle) > 0.2:
# reward_steer = -0.6
total_reward = reward_collision + reward_lane
return total_reward, done
def step(self, actionIndex):
angle = PREDICTED_ANGLES[actionIndex]
self.vehicle.apply_control(carla.VehicleControl(steer=angle))
v = self.vehicle.get_velocity()
#self.vehicle.set_target_velocity(v)
kmh = int(3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2))
speed_reward = 1
total_rewards, done = self.get_rewards(angle)
total_rewards+=speed_reward
if self.episode_start + SECONDS_PER_EPISODE < time.time():
done = True
return self.front_camera, total_rewards, done, None
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size # ()
self.action_size = action_size
self.memory = deque(maxlen=REPLAY_MEMORY_SIZE)
self.training_initialized = False
self.gamma = 0.9 # discount rate
self.loss_list = deque(maxlen=20000)
self.epsilon = 1 # exploration rate
self.epsilon_min = MIN_EPSILON #0.01
self.epsilon_decay = EPSILON_DECAY #0.99995 # changed to 0.95
self.learning_rate = 0.001
self.model = self._build_model()
self.target_model = self._build_model()
self.episode_number = 0
self.terminate = False
# @jit
def _build_model(self):
base_model = tf.keras.applications.ResNet50(weights='imagenet', include_top=False, input_shape=(240, 360, 3))
base_model.trainable = False
# Additional Linear Layers
inputs = tf.keras.Input(shape=(240, 360, 3))
x = base_model(inputs, training=False)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(units=40, activation='relu')(x)
output = tf.keras.layers.Dense(units=3, activation='linear')(x)
# Compile the Model
model = tf.keras.Model(inputs, output)
model.compile(loss='mse', optimizer=tf.keras.optimizers.Adam(learning_rate=self.learning_rate))
print(model.summary)
return model
def memorize(self, state, action, reward, next_state, done):
transition = (state, action, reward, next_state, done)
self.memory.append(transition)
# with open('_out/memory_list.txt', "wb") as myfile:
# myfile.write(str(transition).rstrip('\n') +" ")
def load(self, name):
self.model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
def act(self, state):
# randomly select action
if np.random.rand() <= self.epsilon:
return (random.randrange(self.action_size),True)
# use NN to predict action
state = np.expand_dims(state, axis=0)
act_values = self.model.predict(state)
return (np.argmax(act_values[0]), False)
def replay(self):
with tf.device('/gpu:0'):
if len(self.memory) < MIN_REPLAY_MEMORY_SIZE:
return
minibatch = random.sample(self.memory, MINIBATCH_SIZE)
current_states = np.array([transition[0] for transition in minibatch])/255
current_qs_list = self.model.predict(current_states, PREDICTION_BATCH_SIZE)
new_current_states = np.array([transition[3] for transition in minibatch])/255
future_qs_list = self.target_model.predict(new_current_states, PREDICTION_BATCH_SIZE)
X = []
y = []
for index, (current_state, action, reward, new_state, done) in enumerate(minibatch):
if not done:
max_future_q = np.max(future_qs_list[index])
new_q = reward + DISCOUNT * max_future_q
else:
new_q = reward
current_qs = current_qs_list[index]
current_qs[action] = new_q
X.append(current_state)
y.append(current_qs)
history = self.model.fit( | np.array(X) | numpy.array |
import datetime
import json
import numpy as np
import os
import pandas as pd
from . import util
from .util import minute, lin_interp, cos_deg, sin_deg
from .base_model import BaseModel
class DualTrackModel(BaseModel):
delta = None
time_points = None
time_point_delta = None
window = None
data_source_lbl = None
data_target_lbl = None
data_undefined_vals = None
data_defined_vals = None
data_true_vals = None
data_false_vals = None
data_far_time = None
@classmethod
def create_features_and_times(cls, data1, data2, angle=77, max_deltas=0):
#TODO: take two data items and create using recipe from
# load paired / cook paired
t, x, y_tv, label, is_defined = cls.build_features(data1,
skip_label=True)
t_fv, x_fv, y_fv, _, _ = cls.build_features(data2, interp_t = t,
skip_label=True)
data = (t, x, y_tv, x_fv, y_fv, label, is_defined)
min_ndx = 0
max_ndx = len(y_tv) - cls.time_points
features = []
times = []
i0 = 0
while i0 < max_ndx:
i1 = min(i0 + cls.time_points + max_deltas * cls.time_point_delta, len(y_tv))
_, f_chunk = cls.cook_paired_data(*data, noise=0,
start_ndx=i0, end_ndx=i1)
features.append(f_chunk)
i0 = i0 + max_deltas * cls.time_point_delta + 1
times = t[cls.time_points//2:-cls.time_points//2]
return features, times
@classmethod
def build_features(cls, obj, skip_label=False, keep_frac=1.0, interp_t=None):
n_pts = len(obj['timestamp'])
assert 0 < keep_frac <= 1
if keep_frac == 1:
mask = None
else:
# Build a random mask with probability keep_frac. Force
# first and last point to be true so the time frame
# stays the same.
mask = np.random.uniform(0, 1, size=[n_pts]) < keep_frac
mask[0] = mask[-1] = True
delta = None if (interp_t is not None) else cls.delta
assert np.isnan(obj['speed']).sum() == np.isnan(obj['course']).sum() == 0
v = np.array(obj['speed'])
# Replace missing speeds with arbitrary 3.5 (between setting and hauling)
# TODO: use implied speed instead
v[np.isnan(v)] = 3.5
obj['speed'] = v
xi, speeds = lin_interp(obj, 'speed', delta=delta, t=interp_t, mask=mask)
y0 = speeds
#
_, cos_yi = lin_interp(obj, 'course', delta=delta, t=interp_t, mask=mask, func=cos_deg)
_, sin_yi = lin_interp(obj, 'course', delta=delta, t=interp_t, mask=mask, func=sin_deg)
angle_i = np.arctan2(sin_yi, cos_yi)
y1 = angle_i
#
_, y2 = lin_interp(obj, 'lat', delta=delta, t=interp_t, mask=mask)
# Longitude can cross the dateline, so interpolate useing cos / sin
_, cos_yi = lin_interp(obj, 'lon', delta=delta, t=interp_t, mask=mask, func=cos_deg)
_, sin_yi = lin_interp(obj, 'lon', delta=delta, t=interp_t, mask=mask, func=sin_deg)
y3 = np.degrees( | np.arctan2(sin_yi, cos_yi) | numpy.arctan2 |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(183, 'P 6 m m', transformations)
space_groups[183] = sg
space_groups['P 6 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(184, 'P 6 c c', transformations)
space_groups[184] = sg
space_groups['P 6 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(185, 'P 63 c m', transformations)
space_groups[185] = sg
space_groups['P 63 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(186, 'P 63 m c', transformations)
space_groups[186] = sg
space_groups['P 63 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(187, 'P -6 m 2', transformations)
space_groups[187] = sg
space_groups['P -6 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(188, 'P -6 c 2', transformations)
space_groups[188] = sg
space_groups['P -6 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(189, 'P -6 2 m', transformations)
space_groups[189] = sg
space_groups['P -6 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(190, 'P -6 2 c', transformations)
space_groups[190] = sg
space_groups['P -6 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(191, 'P 6/m m m', transformations)
space_groups[191] = sg
space_groups['P 6/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(192, 'P 6/m c c', transformations)
space_groups[192] = sg
space_groups['P 6/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(193, 'P 63/m c m', transformations)
space_groups[193] = sg
space_groups['P 63/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(194, 'P 63/m m c', transformations)
space_groups[194] = sg
space_groups['P 63/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(195, 'P 2 3', transformations)
space_groups[195] = sg
space_groups['P 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(196, 'F 2 3', transformations)
space_groups[196] = sg
space_groups['F 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(197, 'I 2 3', transformations)
space_groups[197] = sg
space_groups['I 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(198, 'P 21 3', transformations)
space_groups[198] = sg
space_groups['P 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(199, 'I 21 3', transformations)
space_groups[199] = sg
space_groups['I 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(200, 'P m -3', transformations)
space_groups[200] = sg
space_groups['P m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = | N.array([1,2,2]) | numpy.array |
import photutils
from astropy.io import fits, ascii
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
import sys
import os
from pkg_resources import resource_filename
if 'DISPLAY' not in os.environ:
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import patches
from matplotlib import gridspec
import glob
from photutils import CircularAperture, CircularAnnulus
from photutils import RectangularAperture
from photutils import aperture_photometry
import photutils
if photutils.__version__ > "1.0":
from . import fit_2dgauss
from photutils.centroids import centroid_2dg
else:
from photutils import centroid_2dg
import numpy as np
from astropy.time import Time
import astropy.units as u
import pdb
from copy import deepcopy
import yaml
import warnings
from scipy.stats import binned_statistic
from astropy.table import Table
import multiprocessing
from multiprocessing import Pool
import time
import logging
import urllib
import tqdm
maxCPUs = multiprocessing.cpu_count() // 3
try:
import bokeh.plotting
from bokeh.models import ColumnDataSource, HoverTool
from bokeh.models import Range1d
from bokeh.models import WheelZoomTool
except ImportError as err2:
print("Could not import bokeh plotting. Interactive plotting may not work")
from .utils import robust_poly, robust_statistics
from .utils import get_baseDir
from .instrument_specific import rowamp_sub
def run_one_phot_method(allInput):
"""
Do a photometry/spectroscopy method on one file
For example, do aperture photometry on one file
This is a slightly awkward workaround because multiprocessing doesn't work on object methods
So it's a separate function that takes an object and runs the method
Parameters
-----------
allInput: 3 part tuple (object, int, string)
This contains the object, file index to run (0-based) and name of the method to run
"""
photObj, ind, method = allInput
photMethod = getattr(photObj,method)
return photMethod(ind)
def run_multiprocessing_phot(photObj,fileIndices,method='phot_for_one_file'):
"""
Run photometry/spectroscopy methods on all files using multiprocessing
Awkward workaround because multiprocessing doesn't work on object methods
Parameters
----------
photObj: Photometry object
A photometry Object instance
fileIndices: list
List of file indices
method: str
Method on which to apply multiprocessing
"""
allInput = []
for oneInd in fileIndices:
allInput.append([photObj,oneInd,method])
n_files = len(fileIndices)
if n_files < maxCPUs:
raise Exception("Fewer files to process than CPUs, this can confuse multiprocessing")
p = Pool(maxCPUs)
outputDat = list(tqdm.tqdm(p.imap(run_one_phot_method,allInput),total=n_files))
p.close()
return outputDat
def read_yaml(filePath):
with open(filePath) as yamlFile:
yamlStructure = yaml.safe_load(yamlFile)
return yamlStructure
path_to_example = "parameters/phot_params/example_phot_parameters.yaml"
exampleParamPath = resource_filename('tshirt',path_to_example)
class phot:
def __init__(self,paramFile=exampleParamPath,
directParam=None):
""" Photometry class
Parameters
------
paramFile: str
Location of the YAML file that contains the photometry parameters as long
as directParam is None. Otherwise, it uses directParam
directParam: dict
Rather than use the paramFile, you can put a dictionary here.
This can be useful for running a batch of photometric extractions.
Properties
-------
paramFile: str
Same as paramFile above
param: dict
The photometry parameters like file names, aperture sizes, guess locations
fileL: list
The files on which photometry will be performed
nImg: int
Number of images in the sequence
directParam: dict
Parameter dictionary rather than YAML file (useful for batch processing)
"""
self.pipeType = 'photometry'
self.get_parameters(paramFile=paramFile,directParam=directParam)
defaultParams = {'srcGeometry': 'Circular', 'bkgSub': True, 'isCube': False, 'cubePlane': 0,
'doCentering': True, 'bkgGeometry': 'CircularAnnulus',
'boxFindSize': 18,'backStart': 9, 'backEnd': 12,
'scaleAperture': False, 'apScale': 2.5, 'apRange': [0.01,9999],
'scaleBackground': False,
'nanTreatment': 'zero', 'backOffset': [0.0,0.0],
'srcName': 'WASP 62','srcNameShort': 'wasp62',
'refStarPos': [[50,50]],'procFiles': '*.fits',
'apRadius': 9,'FITSextension': 0,
'jdRef': 2458868,
'nightName': 'UT2020-01-20','srcName'
'FITSextension': 0, 'HEADextension': 0,
'refPhotCentering': None,'isSlope': False,
'itimeKeyword': 'INTTIME','readNoise': None,
'detectorGain': None,'cornerSubarray': False,
'subpixelMethod': 'exact','excludeList': None,
'dateFormat': 'Two Part','copyCentroidFile': None,
'bkgMethod': 'mean','diagnosticMode': False,
'bkgOrderX': 1, 'bkgOrderY': 1,'backsub_directions': ['Y','X'],
'readFromTshirtExamples': False,
'saturationVal': None, 'satNPix': 5, 'nanReplaceValue': 0.0,
'DATE-OBS': None,
'driftFile': None
}
for oneKey in defaultParams.keys():
if oneKey not in self.param:
self.param[oneKey] = defaultParams[oneKey]
xCoors, yCoors = [], []
positions = self.param['refStarPos']
self.nsrc = len(positions)
## Set up file names for output
self.check_file_structure()
self.dataFileDescrip = self.param['srcNameShort'] + '_'+ self.param['nightName']
self.photFile = os.path.join(self.baseDir,'tser_data','phot','phot_'+self.dataFileDescrip+'.fits')
self.centroidFile = os.path.join(self.baseDir,'centroids','cen_'+self.dataFileDescrip+'.fits')
self.refCorPhotFile = os.path.join(self.baseDir,'tser_data','refcor_phot','refcor_'+self.dataFileDescrip+'.fits')
# Get the file list
self.fileL = self.get_fileList()
self.nImg = len(self.fileL)
self.srcNames = np.array(np.arange(self.nsrc),dtype=str)
self.srcNames[0] = 'src'
self.set_up_apertures(positions)
self.check_parameters()
self.get_drift_dat()
def get_parameters(self,paramFile,directParam=None):
if directParam is None:
self.paramFile = paramFile
self.param = read_yaml(paramFile)
else:
self.paramFile = 'direct dictionary'
self.param = directParam
def check_file_structure(self):
"""
Check the file structure for plotting/saving data
"""
baseDir = get_baseDir()
structure_file = resource_filename('tshirt','directory_info/directory_list.yaml')
dirList = read_yaml(structure_file)
for oneFile in dirList:
fullPath = os.path.join(baseDir,oneFile)
ensure_directories_are_in_place(fullPath)
self.baseDir = baseDir
def get_fileList(self):
if self.param['readFromTshirtExamples'] == True:
## Find the files from the package data examples
## This is only when running example pipeline runs or tests
search_path = os.path.join(self.baseDir,'example_tshirt_data',self.param['procFiles'])
if len(glob.glob(search_path)) == 0:
print("Did not find example tshirt data. Now attempting to download...")
get_tshirt_example_data()
else:
search_path = self.param['procFiles']
origList = np.sort(glob.glob(search_path))
if self.param['excludeList'] is not None:
fileList = []
for oneFile in origList:
if os.path.basename(oneFile) not in self.param['excludeList']:
fileList.append(oneFile)
else:
fileList = origList
if len(fileList) == 0:
print("Note: File Search comes up empty")
if os.path.exists(self.photFile):
print("Note: Reading file list from previous phot file instead.")
t1 = Table.read(self.photFile,hdu='FILENAMES')
fileList = np.array(t1['File Path'])
return fileList
def check_parameters(self):
assert type(self.param['backOffset']) == list,"Background offset is not a list"
assert len(self.param['backOffset']) == 2,'Background offset must by a 2 element list'
def set_up_apertures(self,positions):
if self.param['srcGeometry'] == 'Circular':
self.srcApertures = CircularAperture(positions,r=self.param['apRadius'])
elif self.param['srcGeometry'] == 'Square':
self.srcApertures = RectangularAperture(positions,w=self.param['apRadius'],
h=self.param['apRadius'],theta=0)
elif self.param['srcGeometry'] == 'Rectangular':
self.srcApertures = RectangularAperture(positions,w=self.param['apWidth'],
h=self.param['apHeight'],theta=0)
else:
print('Unrecognized aperture')
self.xCoors = self.srcApertures.positions[:,0]
self.yCoors = self.srcApertures.positions[:,1]
if self.param['bkgSub'] == True:
bkgPositions = np.array(deepcopy(positions))
bkgPositions[:,0] = bkgPositions[:,0] + self.param['backOffset'][0]
bkgPositions[:,1] = bkgPositions[:,1] + self.param['backOffset'][1]
if self.param['bkgGeometry'] == 'CircularAnnulus':
self.bkgApertures = CircularAnnulus(bkgPositions,r_in=self.param['backStart'],
r_out=self.param['backEnd'])
elif self.param['bkgGeometry'] == 'Rectangular':
self.bkgApertures = RectangularAperture(bkgPositions,w=self.param['backWidth'],
h=self.param['backHeight'],theta=0)
else:
raise ValueError('Unrecognized background geometry')
def get_default_index(self):
"""
Get the default index from the file list
"""
return self.nImg // 2
def get_default_im(self,img=None,head=None):
""" Get the default image for postage stamps or star identification maps"""
## Get the data
if img is None:
img, head = self.getImg(self.fileL[self.get_default_index()])
return img, head
def get_default_cen(self,custPos=None,ind=0):
"""
Get the default centroids for postage stamps or star identification maps
Parameters
----------
custPos: numpy array
Array of custom positions for the apertures. Otherwise it uses the guess position
ind: int
Image index. This is used to guess the position if a drift file is given
"""
if custPos is None:
initialPos = deepcopy(self.srcApertures.positions)
showApPos = np.zeros_like(initialPos)
showApPos[:,0] = initialPos[:,0] + float(self.drift_dat['dx'][ind])
showApPos[:,1] = initialPos[:,1] + float(self.drift_dat['dy'][ind])
else:
showApPos = custPos
return showApPos
def get_drift_dat(self):
drift_dat_0 = Table()
drift_dat_0['Index'] = np.arange(self.nImg)
#drift_dat_0['File'] = self.fileL
drift_dat_0['dx'] = np.zeros(self.nImg)
drift_dat_0['dy'] = np.zeros(self.nImg)
if self.param['driftFile'] == None:
self.drift_dat = drift_dat_0
drift_file_found = False
else:
if self.param['readFromTshirtExamples'] == True:
## Find the files from the package data examples
## This is only when running example pipeline runs or tests
drift_file_path = os.path.join(self.baseDir,'example_tshirt_data',self.param['driftFile'])
else:
drift_file_path = self.param['driftFile']
if os.path.exists(drift_file_path) == False:
drift_file_found = False
warnings.warn("No Drift file found at {}".format(drift_file_path))
else:
drift_file_found = True
self.drift_dat = ascii.read(drift_file_path)
return drift_file_found
def make_drift_file(self,srcInd=0,refIndex=0):
"""
Use the centroids in photometry to generate a drift file of X/Y offsets
Parameters
----------
srcInd: int
The source index used for drifts
refIndex: int
Which file index corresponds to 0.0 drift
"""
HDUList = fits.open(self.photFile)
cenData = HDUList['CENTROIDS'].data
photHead = HDUList['PHOTOMETRY'].header
nImg = photHead['NIMG']
drift_dat = Table()
drift_dat['Index'] = np.arange(nImg)
x = cenData[:,srcInd,0]
drift_dat['dx'] = x - x[refIndex]
y = cenData[:,srcInd,1]
drift_dat['dy'] = y - y[refIndex]
drift_dat['File'] = HDUList['FILENAMES'].data['File Path']
outPath = os.path.join(self.baseDir,'centroids','drift_'+self.dataFileDescrip+'.ecsv')
drift_dat.meta['Zero Index'] = refIndex
drift_dat.meta['Source Used'] = srcInd
drift_dat.meta['Zero File'] = str(drift_dat['File'][refIndex])
print("Saving Drift file to {}".format(outPath))
drift_dat.write(outPath,overwrite=True,format='ascii.ecsv')
def showStarChoices(self,img=None,head=None,custPos=None,showAps=False,
srcLabel=None,figSize=None,showPlot=False,
apColor='black',backColor='black',
vmin=None,vmax=None,index=None,
labelColor='white',
xLim=None,yLim=None,
txtOffset=20):
"""
Show the star choices for photometry
Parameters
------------------
img : numpy 2D array, optional
An image to plot
head : astropy FITS header, optional
header for image
custPos : numpy 2D array or list of tuple coordinates, optional
Custom positions
showAps : bool, optional
Show apertures rather than circle stars
srcLabel : str or None, optional
What should the source label be? The default is "src"
srcLabel : list or None, optional
Specify the size of the plot.
This is useful for looking at high/lower resolution
showPlot : bool
Show the plot? If True, it will show, otherwise it is saved as a file
apColor: str
The color for the source apertures
backColor: str
The color for the background apertures
vmin: float or None
A value for the :code:`matplotlib.pyplot.plot.imshow` vmin parameter
vmax: float or None
A value for the :code:`matplotlib.pyplot.plot.imshow` vmax parameter
index: int or None
The index of the file name. If None, it uses the default
labelColor: str
Color for the text label for sources
xLim: None or two element list
Specify the minimum and maximum X for the plot. For example xLim=[40,60]
yLim: None or two element list
Specify the minimum and maximum Y for the plot. For example yLim=[40,60]
txtOffset: float
The X and Y offset to place the text label for a source
"""
fig, ax = plt.subplots(figsize=figSize)
if index is None:
index = self.get_default_index()
if img is None:
img, head = self.getImg(self.fileL[index])
else:
img_other, head = self.get_default_im(img=img,head=None)
if vmin is None:
useVmin = np.nanpercentile(img,1)
else:
useVmin = vmin
if vmax is None:
useVmax = np.nanpercentile(img,99)
else:
useVmax = vmax
imData = ax.imshow(img,cmap='viridis',vmin=useVmin,vmax=useVmax,interpolation='nearest')
ax.invert_yaxis()
rad = 50 ## the radius for the matplotlib scatter to show source centers
showApPos = self.get_default_cen(custPos=custPos,ind=index)
if showAps == True:
apsShow = deepcopy(self.srcApertures)
apsShow.positions = showApPos
self.adjust_apertures(index)
if photutils.__version__ >= "0.7":
apsShow.plot(axes=ax,color=apColor)
else:
apsShow.plot(ax=ax,color=apColor)
if self.param['bkgSub'] == True:
backApsShow = deepcopy(self.bkgApertures)
backApsShow.positions = showApPos
backApsShow.positions[:,0] = backApsShow.positions[:,0] + self.param['backOffset'][0]
backApsShow.positions[:,1] = backApsShow.positions[:,1] + self.param['backOffset'][1]
if photutils.__version__ >= "0.7":
backApsShow.plot(axes=ax,color=backColor)
else:
backApsShow.plot(ax=ax,color=backColor)
outName = 'ap_labels_{}.pdf'.format(self.dataFileDescrip)
else:
ax.scatter(showApPos[:,0],showApPos[:,1], s=rad, facecolors='none', edgecolors='r')
outName = 'st_labels_{}.pdf'.format(self.dataFileDescrip)
for ind, onePos in enumerate(showApPos):
#circ = plt.Circle((onePos[0], onePos[1]), rad, color='r')
#ax.add_patch(circ)
if ind == 0:
if srcLabel is None:
name='src'
else:
name=srcLabel
else:
name=str(ind)
ax.text(onePos[0]+txtOffset,onePos[1]+txtOffset,name,color=labelColor)
ax.set_xlabel('X (px)')
ax.set_ylabel('Y (px)')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(imData,label='Counts',cax=cax)
ax.set_xlim(xLim)
ax.set_ylim(yLim)
if showPlot == True:
fig.show()
else:
outF = os.path.join(self.baseDir,'plots','photometry','star_labels',outName)
fig.savefig(outF,
bbox_inches='tight')
plt.close(fig)
def showStamps(self,img=None,head=None,custPos=None,custFWHM=None,
vmin=None,vmax=None,showPlot=False,boxsize=None,index=None):
"""
Shows the fixed apertures on the image with postage stamps surrounding sources
Parameters
-----------
index: int
Index of the file list. This is needed if scaling apertures
"""
## Calculate approximately square numbers of X & Y positions in the grid
numGridY = int(np.floor(np.sqrt(self.nsrc)))
numGridX = int(np.ceil(float(self.nsrc) / float(numGridY)))
fig, axArr = plt.subplots(numGridY, numGridX)
img, head = self.get_default_im(img=img,head=head)
if boxsize == None:
boxsize = self.param['boxFindSize']
showApPos = self.get_default_cen(custPos=custPos)
if index is None:
index = self.get_default_index()
self.adjust_apertures(index)
for ind, onePos in enumerate(showApPos):
if self.nsrc == 1:
ax = axArr
else:
ax = axArr.ravel()[ind]
yStamp_proposed = np.array(onePos[1] + np.array([-1,1]) * boxsize,dtype=np.int)
xStamp_proposed = np.array(onePos[0] + | np.array([-1,1]) | numpy.array |
"""
Module providing unit-testing for the `~halotools.mock_observables.alignments.w_gplus` function.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import warnings
import pytest
from astropy.utils.misc import NumpyRNGContext
from ..gi_plus_projected import gi_plus_projected
from halotools.custom_exceptions import HalotoolsError
slow = pytest.mark.slow
__all__ = ('test_w_gplus_returned_shape', 'test_w_gplus_threading', 'test_orientation_usage', 'test_round_result')
fixed_seed = 43
def test_w_gplus_returned_shape():
"""
make sure the result that is returned has the correct shape
"""
ND = 100
NR = 100
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((ND, 3))
randoms = np.random.random((NR, 3))
period = np.array([1.0, 1.0, 1.0])
rp_bins = np.linspace(0.001, 0.3, 5)
pi_max = 0.2
random_orientation = np.random.random((len(sample1), 2))
random_ellipticities = np.random.random((len(sample1)))
# analytic randoms
result_1 = gi_plus_projected(sample1, random_orientation, random_ellipticities, sample1,
rp_bins, pi_max, period=period, num_threads=1)
assert np.shape(result_1) == (len(rp_bins)-1, )
result_2 = gi_plus_projected(sample1, random_orientation, random_ellipticities, sample1,
rp_bins, pi_max, period=period, num_threads=3)
assert np.shape(result_2) == (len(rp_bins)-1, )
# real randoms
result_1 = gi_plus_projected(sample1, random_orientation, random_ellipticities, sample1,
rp_bins, pi_max, randoms1=randoms, randoms2=randoms, period=period, num_threads=1)
assert np.shape(result_1) == (len(rp_bins)-1, )
result_2 = gi_plus_projected(sample1, random_orientation, random_ellipticities, sample1,
rp_bins, pi_max, randoms1=randoms, randoms2=randoms, period=period, num_threads=3)
assert np.shape(result_2) == (len(rp_bins)-1, )
def test_w_gplus_threading():
"""
test to make sure the results are consistent when num_threads=1 or >1
"""
ND = 100
NR = 100
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((ND, 3))
randoms = np.random.random((NR, 3))
period = np.array([1.0, 1.0, 1.0])
rp_bins = | np.linspace(0.001, 0.3, 5) | numpy.linspace |
# ==============================================================================
# Copyright 2021 SciANN -- <NAME>.
# All Rights Reserved.
#
# Licensed under the MIT License.
#
# A guide for generating collocation points for PINN solvers.
#
# Includes:
# - DataGeneratorX:
# Generate 1D collocation grid.
# - DataGeneratorXY:
# Generate 2D collocation grid for a rectangular domain.
# - DataGeneratorXT:
# Generate 1D time-dependent collocation grid.
# - DataGeneratorXYT:
# Generate 2D time-dependent collocation grid for a rectangular domain.
# ==============================================================================
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
cycol = cycle('bgrcmk')
class DataGeneratorX:
""" Generates 1D collocation grid for training PINNs
# Arguments:
X: [X0, X1]
targets: list and type of targets you wish to impose on PINNs.
('domain', 'bc-left', 'bc-right', 'all')
num_sample: total number of collocation points.
# Examples:
>> dg = DataGeneratorX([0., 1.], ["domain", "bc-left", "bc-right"], 10000)
>> input_data, target_data = dg.get_data()
"""
def __init__(self,
X=[0., 1.],
targets=['domain', 'bc-left', 'bc-right'],
num_sample=10000):
'Initialization'
self.Xdomain = X
self.targets = targets
self.num_sample = num_sample
self.input_data = None
self.target_data = None
self.set_data()
def __len__(self):
return self.input_data[0].shape[0]
def set_data(self):
self.input_data, self.target_data = self.generate_data()
def get_data(self):
return self.input_data, self.target_data
def generate_data(self):
# distribute half inside domain half on the boundary
num_sample = int(self.num_sample/2)
counter = 0
# domain points
x_dom = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
ids_dom = np.arange(x_dom.shape[0])
counter += ids_dom.size
# left bc points
x_bc_left = np.full(int(num_sample/2), self.Xdomain[0])
ids_bc_left = np.arange(x_bc_left.shape[0]) + counter
counter += ids_bc_left.size
# right bc points
x_bc_right = np.full(num_sample-int(num_sample/2), self.Xdomain[1])
ids_bc_right = np.arange(x_bc_right.shape[0]) + counter
counter += ids_bc_right.size
ids_bc = np.concatenate([ids_bc_left, ids_bc_right])
ids_all = np.concatenate([ids_dom, ids_bc])
ids = {
'domain': ids_dom,
'bc-left': ids_bc_left,
'bc-right': ids_bc_right,
'bc': ids_bc,
'all': ids_all
}
assert all([t in ids.keys() for t in self.targets]), \
'accepted target types: {}'.format(ids.keys())
input_data = [
np.concatenate([x_dom, x_bc_left, x_bc_right]).reshape(-1,1),
]
total_sample = input_data[0].shape[0]
target_data = []
for i, tp in enumerate(self.targets):
target_data.append(
(ids[tp], 'zeros')
)
return input_data, target_data
def get_test_grid(self, Nx=1000):
xs = np.linspace(self.Xdomain[0], self.Xdomain[1], Nx)
return xs
def plot_sample_batch(self, batch_size=500):
ids = np.random.choice(len(self), batch_size, replace=False)
x_data = self.input_data[0][ids,:]
y_data = np.random.uniform(-.1, .1, x_data.shape)
plt.scatter(x_data, y_data)
plt.xlabel('x')
plt.ylabel('Random vals')
plt.ylim(-1,1)
plt.title('Sample batch = {}'.format(batch_size))
plt.show()
def plot_data(self):
fig = plt.figure()
for t, (t_idx, t_val) in zip(self.targets, self.target_data):
x_data = self.input_data[0][t_idx,:]
y_data = np.random.uniform(-.1, .1, x_data.shape)
plt.scatter(x_data, y_data, label=t, c=next(cycol))
plt.ylim(-1,1)
plt.xlabel('x')
plt.ylabel('Random vals')
plt.title('Training Data')
plt.legend(title="Training Data", bbox_to_anchor=(1.05, 1), loc='upper left')
fig.tight_layout()
plt.show()
class DataGeneratorXY:
""" Generates 2D collocation grid for a rectangular domain
# Arguments:
X: [X0, X1]
Y: [Y0, Y1]
targets: list and type of targets you wish to impose on PINNs.
('domain', 'bc-left', 'bc-right', 'bc-bot', 'bc-top', 'all')
num_sample: total number of collocation points.
# Examples:
>> dg = DataGeneratorXY([0., 1.], [0., 1.], ["domain", "bc-left", "bc-right"], 10000)
>> input_data, target_data = dg.get_data()
"""
def __init__(self,
X=[0., 1.],
Y=[0., 1.],
targets=['domain', 'bc-left', 'bc-right', 'bc-bot', 'bc-top'],
num_sample=10000):
'Initialization'
self.Xdomain = X
self.Ydomain = Y
self.targets = targets
self.num_sample = num_sample
self.input_data = None
self.target_data = None
self.set_data()
def __len__(self):
return self.input_data[0].shape[0]
def set_data(self):
self.input_data, self.target_data = self.generate_data()
def get_data(self):
return self.input_data, self.target_data
def generate_data(self):
# distribute half inside domain half on the boundary
num_sample = int(self.num_sample/2)
counter = 0
# domain points
x_dom = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
y_dom = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample)
ids_dom = np.arange(x_dom.shape[0])
counter += ids_dom.size
# bc points
num_sample_per_edge = int(num_sample/4)
# left bc points
x_bc_left = np.full(num_sample_per_edge, self.Xdomain[0])
y_bc_left = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample_per_edge)
ids_bc_left = np.arange(x_bc_left.shape[0]) + counter
counter += ids_bc_left.size
# right bc points
x_bc_right = np.full(num_sample_per_edge, self.Xdomain[1])
y_bc_right = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample_per_edge)
ids_bc_right = np.arange(x_bc_right.shape[0]) + counter
counter += ids_bc_right.size
# bot bc points
x_bc_bot = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample_per_edge)
y_bc_bot = np.full(num_sample_per_edge, self.Ydomain[0])
ids_bc_bot = np.arange(x_bc_bot.shape[0]) + counter
counter += ids_bc_bot.size
# right bc points
x_bc_top = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample-num_sample_per_edge)
y_bc_top = np.full(num_sample-num_sample_per_edge, self.Ydomain[1])
ids_bc_top = np.arange(x_bc_top.shape[0]) + counter
counter += ids_bc_top.size
ids_bc = np.concatenate([ids_bc_left, ids_bc_right, ids_bc_bot, ids_bc_top])
ids_all = np.concatenate([ids_dom, ids_bc])
ids = {
'domain': ids_dom,
'bc-left': ids_bc_left,
'bc-right': ids_bc_right,
'bc-bot': ids_bc_bot,
'bc-top': ids_bc_top,
'bc': ids_bc,
'all': ids_all
}
assert all([t in ids.keys() for t in self.targets]), \
'accepted target types: {}'.format(ids.keys())
input_data = [
np.concatenate([x_dom, x_bc_left, x_bc_right, x_bc_bot, x_bc_top]).reshape(-1,1),
np.concatenate([y_dom, y_bc_left, y_bc_right, y_bc_bot, y_bc_top]).reshape(-1,1),
]
total_sample = input_data[0].shape[0]
target_data = []
for i, tp in enumerate(self.targets):
target_data.append(
(ids[tp], 'zeros')
)
return input_data, target_data
def get_test_grid(self, Nx=200, Ny=200):
xs = np.linspace(self.Xdomain[0], self.Xdomain[1], Nx)
ys = np.linspace(self.Ydomain[0], self.Ydomain[1], Ny)
input_data, target_data = np.meshgrid(xs, ys)
return [input_data, target_data]
def plot_sample_batch(self, batch_size=500):
ids = np.random.choice(len(self), batch_size, replace=False)
x_data = self.input_data[0][ids,:]
y_data = self.input_data[1][ids,:]
plt.scatter(x_data, y_data)
plt.xlabel('x')
plt.ylabel('y')
plt.title('Sample batch = {}'.format(batch_size))
plt.show()
def plot_data(self):
fig = plt.figure()
for t, (t_idx, t_val) in zip(self.targets, self.target_data):
x_data = self.input_data[0][t_idx,:]
y_data = self.input_data[1][t_idx,:]
plt.scatter(x_data, y_data, label=t, c=next(cycol))
plt.xlabel('x')
plt.ylabel('y')
plt.legend(title="Training Data", bbox_to_anchor=(1.05, 1), loc='upper left')
fig.tight_layout()
plt.show()
class DataGeneratorXT:
""" Generates 1D time-dependent collocation grid for training PINNs
# Arguments:
X: [X0, X1]
T: [T0, T1]
targets: list and type of targets you wish to impose on PINNs.
('domain', 'ic', 'bc-left', 'bc-right', 'all')
num_sample: total number of collocation points.
logT: generate random samples logarithmic in time.
# Examples:
>> dg = DataGeneratorXT([0., 1.], [0., 1.], ["domain", "ic", "bc-left", "bc-right"], 10000)
>> input_data, target_data = dg.get_data()
"""
def __init__(self,
X=[0., 1.],
T=[0., 1.],
targets=['domain', 'ic', 'bc-left', 'bc-right'],
num_sample=10000,
logT=False):
'Initialization'
self.Xdomain = X
self.Tdomain = T
self.logT = logT
self.targets = targets
self.num_sample = num_sample
self.input_data = None
self.target_data = None
self.set_data()
def __len__(self):
return self.input_data[0].shape[0]
def set_data(self):
self.input_data, self.target_data = self.generate_data()
def get_data(self):
return self.input_data, self.target_data
def generate_uniform_T_samples(self, num_sample):
if self.logT is True:
t_dom = np.random.uniform(np.log1p(self.Tdomain[0]), np.log1p(self.Tdomain[1]), num_sample)
t_dom = np.exp(t_dom) - 1.
else:
t_dom = np.random.uniform(self.Tdomain[0], self.Tdomain[1], num_sample)
return t_dom
def generate_data(self):
# Half of the samples inside the domain.
num_sample = int(self.num_sample/2)
counter = 0
# domain points
x_dom = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
t_dom = self.generate_uniform_T_samples(num_sample)
ids_dom = np.arange(x_dom.shape[0])
counter += ids_dom.size
# The other half distributed equally between BC and IC.
num_sample = int(self.num_sample/4)
# initial conditions
x_ic = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
t_ic = np.full(num_sample, self.Tdomain[0])
ids_ic = np.arange(x_ic.shape[0]) + counter
counter += ids_ic.size
# bc points
num_sample_per_edge = int(num_sample/2)
# left bc points
x_bc_left = np.full(num_sample_per_edge, self.Xdomain[0])
t_bc_left = self.generate_uniform_T_samples(num_sample_per_edge)
ids_bc_left = np.arange(x_bc_left.shape[0]) + counter
counter += ids_bc_left.size
# right bc points
x_bc_right = np.full(num_sample-num_sample_per_edge, self.Xdomain[1])
t_bc_right = self.generate_uniform_T_samples(num_sample-num_sample_per_edge)
ids_bc_right = np.arange(x_bc_right.shape[0]) + counter
counter += ids_bc_right.size
ids_bc = np.concatenate([ids_bc_left, ids_bc_right])
ids_all = np.concatenate([ids_dom, ids_ic, ids_bc])
ids = {
'domain': ids_dom,
'bc-left': ids_bc_left,
'bc-right': ids_bc_right,
'ic': ids_ic,
'bc': ids_bc,
'all': ids_all
}
assert all([t in ids.keys() for t in self.targets]), \
'accepted target types: {}'.format(ids.keys())
input_data = [
np.concatenate([x_dom, x_ic, x_bc_left, x_bc_right]).reshape(-1,1),
np.concatenate([t_dom, t_ic, t_bc_left, t_bc_right]).reshape(-1,1),
]
total_sample = input_data[0].shape[0]
target_data = []
for i, tp in enumerate(self.targets):
target_data.append(
(ids[tp], 'zeros')
)
return input_data, target_data
def get_test_grid(self, Nx=200, Nt=200):
xs = np.linspace(self.Xdomain[0], self.Xdomain[1], Nx)
if self.logT:
ts = np.linspace(np.log1p(self.Tdomain[0]), np.log1p(self.Tdomain[1]), Nt)
ts = np.exp(ts) - 1.0
else:
ts = np.linspace(self.Tdomain[0], self.Tdomain[1], Nt)
return np.meshgrid(xs, ts)
def plot_sample_batch(self, batch_size=500):
ids = np.random.choice(len(self), batch_size, replace=False)
x_data = self.input_data[0][ids,:]
t_data = self.input_data[1][ids,:]
plt.scatter(x_data, t_data)
plt.xlabel('x')
plt.ylabel('t')
plt.title('Sample batch = {}'.format(batch_size))
plt.show()
def plot_data(self):
fig = plt.figure()
for t, (t_idx, t_val) in zip(self.targets, self.target_data):
x_data = self.input_data[0][t_idx,:]
t_data = self.input_data[1][t_idx,:]
plt.scatter(x_data, t_data, label=t, c=next(cycol))
plt.xlabel('x')
plt.ylabel('t')
plt.legend(title="Training Data", bbox_to_anchor=(1.05, 1), loc='upper left')
fig.tight_layout()
plt.show()
class DataGeneratorXYT:
""" Generates 2D time-dependent collocation grid for training PINNs
# Arguments:
X: [X0, X1]
Y: [Y0, Y1]
T: [T0, T1]
targets: list and type of targets you wish to impose on PINNs.
('domain', 'ic', 'bc-left', 'bc-right', 'bc-bot', 'bc-top', 'all')
num_sample: total number of collocation points.
logT: generate random samples logarithmic in time.
# Examples:
>> dg = DataGeneratorXYT([0., 1.], [0., 1.], [0., 1.],
["domain", "ic", "bc-left", "bc-right", "bc-bot", "bc-top"],
10000)
>> input_data, target_data = dg.get_data()
"""
def __init__(self,
X=[0., 1.],
Y=[0., 1.],
T=[0., 1.],
targets=['domain', 'ic', 'bc-left', 'bc-right', 'bc-bot', 'bc-top'],
num_sample=10000,
logT=False):
'Initialization'
self.Xdomain = X
self.Ydomain = Y
self.Tdomain = T
self.logT = logT
self.targets = targets
self.num_sample = num_sample
self.input_data = None
self.target_data = None
self.set_data()
def __len__(self):
return self.input_data[0].shape[0]
def set_data(self):
self.input_data, self.target_data = self.generate_data()
def get_data(self):
return self.input_data, self.target_data
def generate_uniform_T_samples(self, num_sample):
if self.logT is True:
t_dom = np.random.uniform(np.log1p(self.Tdomain[0]), np.log1p(self.Tdomain[1]), num_sample)
t_dom = np.exp(t_dom) - 1.
else:
t_dom = np.random.uniform(self.Tdomain[0], self.Tdomain[1], num_sample)
return t_dom
def generate_data(self):
# Half of the samples inside the domain.
num_sample = int(self.num_sample/2)
counter = 0
# domain points
x_dom = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
y_dom = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample)
t_dom = self.generate_uniform_T_samples(num_sample)
ids_dom = | np.arange(x_dom.shape[0]) | numpy.arange |
# packages
import numpy as np
# project
import forecast.helpers as hlp
import config.parameters as prm
class Sensor():
"""
One Sensor object for each sensor in project.
It keeps track of the algorithm state between events.
When new event_data json is received, iterate algorithm one sample.
"""
def __init__(self, device, device_id, args):
# give to self
self.device = device
self.device_id = device_id
self.args = args
# contains level, trend and season for modelled data
self.model = {
'unixtime': [], # shared unixtime timeaxis
'temperature': [], # temperature values
'level': [], # modeled level
'trend': [], # modeled trend
'season': [], # modeled season
}
# contains all previous forecasts in history
self.forecast = {
'unixtime': [], # shared unixtime timeaxis
'temperature': [], # temperature values
'residual': [], # forecast residual
}
# variables
self.n_samples = 0 # number of event samples received
self.initialised = False
self.residual_std = 0
def new_event_data(self, event_data):
"""
Receive new event from Director and iterate algorithm.
Parameters
----------
event_data : dict
Event json containing temperature data.
"""
# convert timestamp to unixtime
_, unixtime = hlp.convert_event_data_timestamp(event_data['data']['temperature']['updateTime'])
# append new temperature value
self.model['unixtime'].append(unixtime)
self.model['temperature'].append(event_data['data']['temperature']['value'])
self.n_samples += 1
# initialise holt winters
if self.n_samples < prm.season_length * prm.n_seasons_init:
return
elif not self.initialised:
self.__initialise_holt_winters()
else:
# iterate Holt-Winters
self.__iterate_holt_winters()
# forecast
self.__model_forecast()
def __initialise_holt_winters(self):
"""
Calculate initial level, trend and seasonal component.
Based on: https://robjhyndman.com/hyndsight/hw-initialization/
"""
# convert to numpy array for indexing
temperature = np.array(self.model['temperature'])
# fit a 3xseason moving average to temperature
ma = np.zeros(self.n_samples)
for i in range(self.n_samples):
# define ma interval
xl = max(0, i - int(1.5*prm.season_length))
xr = min(self.n_samples, i + int(1.5*prm.season_length+1))
# mean
ma[i] = np.mean(temperature[xl:xr])
# subtract moving average
df = temperature - ma
# generate average seasonal component
avs = []
for i in range(prm.season_length):
avs.append(np.mean([df[i+j*prm.season_length] for j in range(prm.n_seasons_init)]))
# expand average season into own seasonal component
for i in range(self.n_samples):
self.model['season'].append(avs[i%len(avs)])
# subtract initial season from original temperature to get adjusted temperature
adjusted = temperature - np.array(self.model['season'])
# fit a linear trend to adjusted temperature
xax = np.arange(self.n_samples)
a, b = hlp.algebraic_linreg(xax, adjusted)
linreg = a + xax*b
# set initial level, slope, and brutlag deviation
for i in range(self.n_samples):
self.model['level'].append(linreg[i])
self.model['trend'].append(b)
# flip flag
self.initialised = True
def __iterate_holt_winters(self):
"""
Update level, trend and seasonal component of Holt-Winters model.
"""
# calculate level (l), trend (b), and season (s) components
l = prm.alpha*(self.model['temperature'][-1] - self.model['season'][-prm.season_length]) + (1 - prm.alpha)*(self.model['level'][-1] + self.model['trend'][-1])
b = prm.beta*(l - self.model['level'][-1]) + (1 - prm.beta)*self.model['trend'][-1]
s = prm.gamma*(self.model['temperature'][-1] - self.model['level'][-1] - self.model['trend'][-1]) + (1 - prm.gamma)*self.model['season'][-prm.season_length]
# append components
self.model['level'].append(l)
self.model['trend'].append(b)
self.model['season'].append(s)
def __model_forecast(self):
"""
Holt-Winters n-step ahead forecasting and prediction interval calculation.
Forecast based on: https://otexts.com/fpp2/prediction-intervals.html
Prediction intervals based on: https://otexts.com/fpp2/prediction-intervals.html
"""
# use average step length the last 24h
tax = np.array(self.model['unixtime'])[np.array(self.model['unixtime']) > int(self.model['unixtime'][-1])-60*60*24]
ux_step = | np.mean(tax[1:] - tax[:-1]) | numpy.mean |
"""
This file contains all the funcitons for creating an image plus with projected lines of a predefined height from radar data.
The height can either be predefined or calculated by the radar elevation field of view.
This file has been completely reworked on 2019-01-23 for best functionalities. Some function arguments changed, so please verify if you referr to this file.
"""
# Standard libraries
import os
import os.path as osp
import sys
import math
import time
# 3rd party libraries
import cv2
import json
import numpy as np
from pyquaternion import Quaternion
from PIL import Image
# Local modules
# Allow relative imports when being executed as script.
if __name__ == "__main__" and not __package__:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
import crfnet.raw_data_fusion # noqa: F401
__package__ = "crfnet.raw_data_fusion"
from nuscenes.utils.data_classes import PointCloud
from ...utils import radar
# from nuscenes.utils.geometry_utils import view_points
def _resize_image(image_data, target_shape):
"""
Perfomrs resizing of the image and calculates a matrix to adapt the intrinsic camera matrix
:param image_data: [np.array] with shape (height x width x 3)
:param target_shape: [tuple] with (width, height)
:return resized image: [np.array] with shape (height x width x 3)
:return resize matrix: [numpy array (3 x 3)]
"""
# print('resized', type(image_data))
stupid_confusing_cv2_size_because_width_and_height_are_in_wrong_order = (target_shape[1], target_shape[0])
resized_image = cv2.resize(image_data, stupid_confusing_cv2_size_because_width_and_height_are_in_wrong_order)
resize_matrix = np.eye(3, dtype=resized_image.dtype)
resize_matrix[1, 1] = target_shape[0]/image_data.shape[0]
resize_matrix[0, 0] = target_shape[1]/image_data.shape[1]
return resized_image, resize_matrix
def _radar_transformation(radar_data, height=None):
"""
Transforms the given radar data with height z = 0 and another height as input using extrinsic radar matrix to vehicle's co-sy
This function appends the distance to the radar point.
Parameters:
:param radar_data: [numpy array] with radar parameter (e.g. velocity) in rows and radar points for one timestep in columns
Semantics: x y z dyn_prop id rcs vx vy vx_comp vy_comp is_quality_valid ambig_state x_rms y_rms invalid_state pdh0 distance
:param radar_extrinsic: [numpy array (3x4)] that consists of the extrinsic parameters of the given radar sensor
:param height: [tuple] (min height, max height) that defines the (unknown) height of the radar points
Returns:
:returns radar_data: [numpy array (m x no of points)] that consists of the transformed radar points with z = 0
:returns radar_xyz_endpoint: [numpy array (3 x no of points)] that consits of the transformed radar points z = height
"""
# Field of view (global)
ELEVATION_FOV_SR = 20
ELEVATION_FOV_FR = 14
# initialization
num_points = radar_data.shape[1]
# Radar points for the endpoint
radar_xyz_endpoint = radar_data[0:3,:].copy()
# variant 1: constant height substracted by RADAR_HEIGHT
RADAR_HEIGHT = 0.5
if height:
radar_data[2, :] = np.ones((num_points,)) * (height[0] - RADAR_HEIGHT) # lower points
radar_xyz_endpoint[2, :] = np.ones((num_points,)) * (height[1] - RADAR_HEIGHT) # upper points
# variant 2: field of view
else:
dist = radar_data[-1,:]
count = 0
for d in dist:
# short range mode
if d <= 70:
radar_xyz_endpoint[2, count] = -d * np.tan(ELEVATION_FOV_SR/2)
# long range mode
else:
radar_xyz_endpoint[2, count] = -d * np.tan(ELEVATION_FOV_FR/2)
count += 1
return radar_data, radar_xyz_endpoint
def _create_line(P1, P2, img):
"""
Produces and array that consists of the coordinates and intensities of each pixel in a line between two points
:param P1: [numpy array] that consists of the coordinate of the first point (x,y)
:param P2: [numpy array] that consists of the coordinate of the second point (x,y)
:param img: [numpy array] the image being processed
:return itbuffer: [numpy array] that consists of the coordinates and intensities of each pixel in the radii (shape: [numPixels, 3], row = [x,y])
"""
# define local variables for readability
imageH = img.shape[0]
imageW = img.shape[1]
P1X = P1[0]
P1Y = P1[1]
P2X = P2[0]
P2Y = P2[1]
# difference and absolute difference between points
# used to calculate slope and relative location between points
dX = P2X - P1X
dY = P2Y - P1Y
dXa = np.abs(dX)
dYa = np.abs(dY)
# predefine numpy array for output based on distance between points
itbuffer = np.empty(
shape=(np.maximum(int(dYa), int(dXa)), 2), dtype=np.float32)
itbuffer.fill(np.nan)
# Obtain coordinates along the line using a form of Bresenham's algorithm
negY = P1Y > P2Y
negX = P1X > P2X
if P1X == P2X: # vertical line segment
itbuffer[:, 0] = P1X
if negY:
itbuffer[:, 1] = np.arange(P1Y - 1, P1Y - dYa - 1, -1)
else:
itbuffer[:, 1] = np.arange(P1Y+1, P1Y+dYa+1)
elif P1Y == P2Y: # horizontal line segment
itbuffer[:, 1] = P1Y
if negX:
itbuffer[:, 0] = np.arange(P1X-1, P1X-dXa-1, -1)
else:
itbuffer[:, 0] = np.arange(P1X+1, P1X+dXa+1)
else: # diagonal line segment
steepSlope = dYa > dXa
if steepSlope:
slope = dX.astype(np.float32)/dY.astype(np.float32)
if negY:
itbuffer[:, 1] = np.arange(P1Y-1, P1Y-dYa-1, -1)
else:
itbuffer[:, 1] = np.arange(P1Y+1, P1Y+dYa+1)
itbuffer[:, 0] = (slope*(itbuffer[:, 1]-P1Y)).astype(np.int) + P1X
else:
slope = dY.astype(np.float32)/dX.astype(np.float32)
if negX:
itbuffer[:, 0] = np.arange(P1X-1, P1X-dXa-1, -1)
else:
itbuffer[:, 0] = np.arange(P1X+1, P1X+dXa+1)
itbuffer[:, 1] = (slope*(itbuffer[:, 0]-P1X)).astype(np.int) + P1Y
# Remove points outside of image
colX = itbuffer[:, 0].astype(int)
colY = itbuffer[:, 1].astype(int)
itbuffer = itbuffer[(colX >= 0) & (colY >= 0) &
(colX < imageW) & (colY < imageH)]
return itbuffer
def _create_vertical_line(P1, P2, img):
"""
Produces and array that consists of the coordinates and intensities of each pixel in a line between two points
:param P1: [numpy array] that consists of the coordinate of the first point (x,y)
:param P2: [numpy array] that consists of the coordinate of the second point (x,y)
:param img: [numpy array] the image being processed
:return itbuffer: [numpy array] that consists of the coordinates and intensities of each pixel in the radii (shape: [numPixels, 3], row = [x,y])
using cross projection
"""
# define local variables for readability
imageH = img.shape[0]
imageW = img.shape[1]
# difference and absolute difference between points
# used to calculate slope and relative location between points
P1_y = int(P1[1])
P2_y = int(P2[1])
#try to improve here, using cross projection, add horizontal line. dX=dY/2
dY = P2_y - P1_y
dX = math.floor(dY/2)
if dY == 0:
dY = 1
if dX == 0:
dX = 1
dXa = | np.abs(dX) | numpy.abs |
# Strain calculation functions
import numpy as np
from numpy import polyder, polyval, polyfit
import crosspy
def strain_calc(d, mapnos = 0, strain_method = 'l2'):
# This function calculates the strain between two images
# Images
# nodes_x = roi centres x
# nodes_y = roi centres y
# disp_x = dx_maps
# disp_y = dy_maps
# get displacements and positions
x = d.x_pos
y = d.y_pos
dx = d.dx_maps[:,:,mapnos]
dy = d.dy_maps[:,:,mapnos]
# determine strain method
if strain_method == 'l2':
e, e_eff, r, f = strain_l2(dx, dy, x, y)
else:
raise Exception('Invalid strain method!')
return e, e_eff, r, f
def strain_l2(dx, dy, x, y):
# Function to determine strain via polynomial fitting and l2 min
# The purpose of this strain algo is to smooth the strain assuming that strain is a continous gradient field
# Preallocate arrays
rows = np.size(dx,0)
cols = np.size(dx,1)
e_temp = np.zeros(shape=(rows,cols,3,3))
eeff_temp = np.zeros(shape=(rows,cols,1))
rotation_temp = np.zeros(shape=(rows,cols,3,3))
e11_temp = np.zeros(shape=(rows,cols))
e22_temp = np.zeros(shape=(rows,cols))
e12_temp = np.zeros(shape=(rows,cols))
# First obtain strain values for corners and edges of the map
e11_temp, e22_temp, e12_temp = strain_l2_corners(dx, dy, x, y, e11_temp, e22_temp, e12_temp)
e11_temp, e22_temp, e12_temp = strain_l2_edges(dx, dy, x, y, e11_temp, e22_temp, e12_temp)
# Obtain bulk values in loop below - corners and edges are already determined
for i in range(0,rows-1):
for j in range(0,cols-1):
dhor_3pt_x = np.array([dx[i,j-1], dx[i,j], dx[i,j+1]])
dhor_3pt_y = np.array([dy[i,j-1], dy[i,j], dy[i,j+1]])
dver_3pt_x = np.array([dx[i-1,j], dx[i,j], dx[i+1,j]])
dver_3pt_y = np.array([dy[i-1,j], dy[i,j], dy[i+1,j]])
pos_x3 = np.array([x[i,j-1], x[i,j], x[i,j+1]])
pos_y3 = np.array([y[i-1,j], y[i,j], y[i+1,j]])
# Determine second order poly fit and derivative
coef_x = polyder(polyfit(pos_x3, dhor_3pt_x,2))
coef_y = polyder(polyfit(pos_y3, dver_3pt_y,2))
coef_xy = polyder(polyfit(pos_x3, dhor_3pt_y,2))
coef_yx = polyder(polyfit(pos_y3, dhor_3pt_x,2))
# Obtain values of polynomial fit at the centre of object pixel
du_dx = polyval(coef_x, x[i,j]) # du from dx map
dv_dy = polyval(coef_y, y[i,j]) # dv from dy map
du_dy = polyval(coef_xy, x[i,j]) # du from dy map
dv_dx = polyval(coef_yx, y[i,j]) # dv from dx map
# Create the deformation gradient F from displacements u and v
F = np.array([[du_dx, du_dy, 0], [dv_dx, dv_dy, 0], [0, 0, -(du_dx+dv_dy)]])+np.eye(3)
Ft = F.transpose()
# C_test = np.dot(F,Ft)
C = np.matmul(F.transpose(), F) # Green-Lagrange tensor
V, Q = np.linalg.eig(C) # eigenvalues V and vector Q
V = np.diag(V)
Ut = np.sqrt(V)
U = np.matmul(Q.transpose(), np.matmul(Ut, Q))
U_1 = np.linalg.inv(U)
R = np.dot(F, U_1) # rotations
# Determine green strain tensor and rotation tensor from F by symm and anti symm parts
e_temp[i,j,:,:] = 0.5*(np.matmul(F.transpose(),F-np.eye(3)))
rotation_temp[i,j,:,:] = R
# Determine the effective strain
xs = np.dot(e_temp[i,j],e_temp[i,j])
eeff_temp[i,j,:] = np.sqrt((2/3)*np.tensordot(e_temp[i,j],e_temp[i,j]))
# Form outputs
strain = e_temp
strain_effective = eeff_temp
rotation = R
return strain, strain_effective, rotation, F
def strain_l2_corners(dx, dy, x, y, e11, e22, e12):
# Use first order polynomial fitting for the 4 corners of the map
rows = np.size(dx,0)-1
cols = np.size(dx,1)-1
## first corner - top left
dx_x1 = [dx[0,0], dx[0,1]]
dx_y1 = [dy[0,0], dy[0,0]]
dy_x1 = [dx[0,0], dx[1,1]]
dy_y1 = [dy[0,0], dy[1,0]]
pos_x1 = [x[0,0], x[0,1]]
pos_y1 = [y[0,0], y[1,0]]
# determine first order of poly fit
coef_x = polyfit(pos_x1, dx_x1,1)
coef_y = polyfit(pos_y1, dy_y1,1)
coef_xy = polyfit(pos_x1, dx_y1,1)
coef_yx = polyfit(pos_y1, dy_x1,1)
# equal to strain
e11[0,0] = coef_x[0]
e22[0,0] = coef_y[0]
e12[0,0] = 0.5*(coef_xy[0]+coef_yx[0])
## second corner - top right
dx_x2 = [dx[0,cols-1], dx[0,cols]]
dx_y2 = [dy[0,cols], dy[1,cols]]
dy_x2 = [dx[0,cols-1], dx[0,cols]]
dy_y2 = [dy[0,cols], dy[1,cols]]
pos_x2 = [x[0,cols-1], x[1,cols]]
pos_y2 = [y[0,cols], y[1,cols]]
# determine first order of poly fit
coef_2 = polyfit(pos_x2, dx_x2,1)
coef_2 = polyfit(pos_y2, dy_y2,1)
coef_xy = polyfit(pos_x2, dx_y2,1)
coef_yx = polyfit(pos_y2, dy_x2,1)
# equal to strain
e11[0,cols] = coef_x[0]
e22[0,cols] = coef_y[0]
e12[0,cols] = 0.5*(coef_xy[0]+coef_yx[0])
## third corner - bottom left
dx_x3 = [dx[rows,0], dx[rows,1]]
dx_y3 = [dy[rows-1,0], dy[rows,0]]
dy_x3 = [dx[rows,0], dx[rows,1]]
dy_y3 = [dy[rows-1,0], dy[rows,0]]
pos_x3 = [x[rows,0], x[rows,1]]
pos_y3 = [y[rows-1,0], y[rows,0]]
# determine first order of poly fit
coef_x = polyfit(pos_x3, dx_x3,1)
coef_y = polyfit(pos_y3, dy_y3,1)
coef_xy = polyfit(pos_x3, dx_y3,1)
coef_yx = polyfit(pos_y3, dy_x3,1)
# equal to strain
e11[rows,0] = coef_x[0]
e22[rows,0] = coef_y[0]
e12[rows,0] = 0.5*(coef_xy[0]+coef_yx[0])
## fourth corner - bottom right
dx_x4 = [dx[rows,cols-1], dx[rows,cols]]
dx_y4 = [dy[rows-1,cols], dy[rows,cols]]
dy_x4 = [dx[rows,cols-1], dx[rows,cols]]
dy_y4 = [dy[rows-1,cols], dy[rows,cols]]
pos_x4 = [x[rows,cols-1], x[rows,cols]]
pos_y4 = [y[rows-1,cols], y[rows,cols]]
# determine first order of poly fit
coef_x = polyfit(pos_x4, dx_x4,1)
coef_y = polyfit(pos_y4, dy_y4,1)
coef_xy = polyfit(pos_x4, dx_y4,1)
coef_yx = polyfit(pos_y4, dy_x4,1)
# equal to strain
e11[rows,cols] = coef_x[0]
e22[rows,cols] = coef_y[0]
e12[rows,cols] = 0.5*(coef_xy[0]+coef_yx[0])
return e11, e22, e12
def strain_l2_edges(dx, dy, x, y, e11, e22, e12):
# Use polynomial fit to find strain on edges of map
rows = | np.size(dx,0) | numpy.size |
import os
import numpy as np
import cv2
from collections import defaultdict
import hashlib
import glob
import configparser
from sixd_toolkit.params import dataset_params
from sixd_toolkit.pysixd import inout
from auto_pose.ae.pysixd_stuff import view_sampler
from auto_pose.ae import utils as u
import glob
def get_gt_scene_crops(scene_id, eval_args, train_args, load_gt_masks=False):
dataset_name = eval_args.get('DATA','DATASET')
cam_type = eval_args.get('DATA','CAM_TYPE')
icp = eval_args.getboolean('EVALUATION','ICP')
delta = eval_args.get('METRIC', 'VSD_DELTA')
workspace_path = os.environ.get('AE_WORKSPACE_PATH')
dataset_path = u.get_dataset_path(workspace_path)
H_AE = train_args.getint('Dataset','H')
W_AE = train_args.getint('Dataset','W')
cfg_string = str([scene_id] + eval_args.items('DATA') + eval_args.items('BBOXES') + [H_AE])
cfg_string = cfg_string.encode('utf-8')
current_config_hash = hashlib.md5(cfg_string).hexdigest()
current_file_name = os.path.join(dataset_path, current_config_hash + '.npz')
if os.path.exists(current_file_name):
data = np.load(current_file_name)
test_img_crops = data['test_img_crops'].item()
test_img_depth_crops = data['test_img_depth_crops'].item()
bb_scores = data['bb_scores'].item()
bb_vis = data['visib_gt'].item()
bbs = data['bbs'].item()
if not os.path.exists(current_file_name) or len(test_img_crops) == 0 or len(test_img_depth_crops) == 0:
test_imgs = load_scenes(scene_id, eval_args)
test_imgs_depth = load_scenes(scene_id, eval_args, depth=True) if icp else None
data_params = dataset_params.get_dataset_params(dataset_name, model_type='', train_type='', test_type=cam_type, cam_type=cam_type)
# only available for primesense, sixdtoolkit can generate
visib_gt = inout.load_yaml(data_params['scene_gt_stats_mpath'].format(scene_id, delta))
gt = inout.load_gt(data_params['scene_gt_mpath'].format(scene_id))
gt_inst_masks = None
if load_gt_masks:
mask_paths = glob.glob(os.path.join(load_gt_masks, '{:02d}/masks/*.npy'.format(scene_id)))
gt_inst_masks = [np.load(mp) for mp in mask_paths]
test_img_crops, test_img_depth_crops, bbs, bb_scores, bb_vis = generate_scene_crops(test_imgs, test_imgs_depth, gt, eval_args,
(H_AE, W_AE), visib_gt=visib_gt,inst_masks=gt_inst_masks)
np.savez(current_file_name, test_img_crops=test_img_crops, test_img_depth_crops=test_img_depth_crops, bbs = bbs, bb_scores=bb_scores, visib_gt=bb_vis)
current_cfg_file_name = os.path.join(dataset_path, current_config_hash + '.cfg')
with open(current_cfg_file_name, 'w') as f:
f.write(cfg_string)
print('created new ground truth crops!')
else:
print('loaded previously generated ground truth crops!')
print((len(test_img_crops), len(test_img_depth_crops)))
return (test_img_crops, test_img_depth_crops, bbs, bb_scores, bb_vis)
def get_sixd_gt_train_crops(obj_id, hw_ae, pad_factor=1.2, dataset='tless', cam_type='primesense'):
data_params = dataset_params.get_dataset_params(dataset, model_type='', train_type='', test_type=cam_type, cam_type=cam_type)
eval_args = configparser.ConfigParser()
eval_args.add_section("DATA")
eval_args.add_section("BBOXES")
eval_args.add_section("EVALUATION")
eval_args.set('BBOXES', 'ESTIMATE_BBS', "False")
eval_args.set('EVALUATION','ICP', "False")
eval_args.set('BBOXES','PAD_FACTOR', str(pad_factor))
eval_args.set('BBOXES','ESTIMATE_MASKS', "False")
eval_args.set('DATA','OBJ_ID', str(obj_id))
gt = inout.load_gt(data_params['obj_gt_mpath'].format(obj_id))
imgs = []
for im_id in range(504):
imgs.append(cv2.imread(data_params['train_rgb_mpath'].format(obj_id, im_id)))
test_img_crops, _, _, _, _ = generate_scene_crops(np.array(imgs), None, gt, eval_args, hw_ae)
return test_img_crops
def generate_scene_crops(test_imgs, test_depth_imgs, gt, eval_args, hw_ae, visib_gt = None, inst_masks=None):
obj_id = eval_args.getint('DATA','OBJ_ID')
estimate_bbs = eval_args.getboolean('BBOXES', 'ESTIMATE_BBS')
pad_factor = eval_args.getfloat('BBOXES','PAD_FACTOR')
icp = eval_args.getboolean('EVALUATION','ICP')
estimate_masks = eval_args.getboolean('BBOXES','ESTIMATE_MASKS')
print(hw_ae)
H_AE, W_AE = hw_ae
test_img_crops, test_img_depth_crops, bb_scores, bb_vis, bbs = {}, {}, {}, {}, {}
H,W = test_imgs.shape[1:3]
for view,img in enumerate(test_imgs):
if icp:
depth = test_depth_imgs[view]
test_img_depth_crops[view] = {}
test_img_crops[view], bb_scores[view], bb_vis[view], bbs[view] = {}, {}, {}, {}
if len(gt[view]) > 0:
for bbox_idx,bbox in enumerate(gt[view]):
if bbox['obj_id'] == obj_id:
if 'score' in bbox:
if bbox['score']==-1:
continue
bb = np.array(bbox['obj_bb'])
obj_id = bbox['obj_id']
bb_score = bbox['score'] if estimate_bbs else 1.0
if estimate_bbs and visib_gt is not None:
vis_frac = visib_gt[view][bbox_idx]['visib_fract']
else:
vis_frac = None
x,y,w,h = bb
size = int(np.maximum(h,w) * pad_factor)
left = int(np.max([x+w/2-size/2, 0]))
right = int(np.min([x+w/2+size/2, W]))
top = int(np.max([y+h/2-size/2, 0]))
bottom = int(np.min([y+h/2+size/2, H]))
if inst_masks is None:
crop = img[top:bottom, left:right].copy()
if icp:
depth_crop = depth[top:bottom, left:right]
else:
if not estimate_masks:
mask = inst_masks[view]
img_copy = np.zeros_like(img)
img_copy[mask == (bbox_idx+1)] = img[mask == (bbox_idx+1)]
crop = img_copy[top:bottom, left:right].copy()
if icp:
depth_copy = np.zeros_like(depth)
depth_copy[mask == (bbox_idx+1)] = depth[mask == (bbox_idx+1)]
depth_crop = depth_copy[top:bottom, left:right]
else:
# chan = int(bbox['np_channel_id'])
chan = bbox_idx
mask = inst_masks[view][:,:,chan]
# kernel = np.ones((2,2), np.uint8)
# mask_eroded = cv2.dilate(mask.astype(np.uint8), kernel, iterations=1)
# cv2.imshow('mask_erod',mask_eroded.astype(np.float32))
# cv2.imshow('mask',mask.astype(np.float32))
# cv2.waitKey(0)
# mask = mask_eroded.astype(np.bool)
img_copy = np.zeros_like(img)
img_copy[mask] = img[mask]
crop = img_copy[top:bottom, left:right].copy()
if icp:
depth_copy = np.zeros_like(depth)
depth_copy[mask] = depth[mask]
depth_crop = depth_copy[top:bottom, left:right]
#print bb
## uebler hack: remove!
# xmin, ymin, xmax, ymax = bb
# x, y, w, h = xmin, ymin, xmax-xmin, ymax-ymin
# bb = np.array([x, y, w, h])
##
resized_crop = cv2.resize(crop, (H_AE,W_AE))
if icp:
test_img_depth_crops[view].setdefault(obj_id,[]).append(depth_crop)
test_img_crops[view].setdefault(obj_id,[]).append(resized_crop)
bb_scores[view].setdefault(obj_id,[]).append(bb_score)
bb_vis[view].setdefault(obj_id,[]).append(vis_frac)
bbs[view].setdefault(obj_id,[]).append(bb)
return (test_img_crops, test_img_depth_crops, bbs, bb_scores, bb_vis)
def noof_scene_views(scene_id, eval_args):
dataset_name = eval_args.get('DATA','DATASET')
cam_type = eval_args.get('DATA','CAM_TYPE')
p = dataset_params.get_dataset_params(dataset_name, model_type='', train_type='', test_type=cam_type, cam_type=cam_type)
noof_imgs = len(os.listdir(os.path.dirname(p['test_rgb_mpath']).format(scene_id)))
return noof_imgs
def load_scenes(scene_id, eval_args, depth=False):
dataset_name = eval_args.get('DATA','DATASET')
cam_type = eval_args.get('DATA','CAM_TYPE')
p = dataset_params.get_dataset_params(dataset_name, model_type='', train_type='', test_type=cam_type, cam_type=cam_type)
cam_p = inout.load_cam_params(p['cam_params_path'])
noof_imgs = noof_scene_views(scene_id, eval_args)
if depth:
imgs = np.empty((noof_imgs,) + p['test_im_size'][::-1], dtype=np.float32)
for view_id in range(noof_imgs):
depth_path = p['test_depth_mpath'].format(scene_id, view_id)
try:
imgs[view_id,...] = inout.load_depth2(depth_path) * cam_p['depth_scale']
except:
print((depth_path,' not found'))
else:
print(((noof_imgs,) + p['test_im_size'][::-1] + (3,)))
imgs = np.empty((noof_imgs,) + p['test_im_size'][::-1] + (3,), dtype=np.uint8)
print(noof_imgs)
for view_id in range(noof_imgs):
img_path = p['test_rgb_mpath'].format(scene_id, view_id)
try:
imgs[view_id,...] = cv2.imread(img_path)
except:
print((img_path,' not found'))
return imgs
# def generate_masks(scene_id, eval_args):
# dataset_name = eval_args.get('DATA','DATASET')
# cam_type = eval_args.get('DATA','CAM_TYPE')
# p = dataset_params.get_dataset_params(dataset_name, model_type='', train_type='', test_type=cam_type, cam_type=cam_type)
# for scene_id in range(1,21):
# noof_imgs = noof_scene_views(scene_id, eval_args)
# gts = inout.load_gt(dataset_params['scene_gt_mpath'].format(scene_id))
# for view_gt in gts:
# for gt in view_gt:
def get_all_scenes_for_obj(eval_args):
workspace_path = os.environ.get('AE_WORKSPACE_PATH')
dataset_path = u.get_dataset_path(workspace_path)
dataset_name = eval_args.get('DATA','DATASET')
cam_type = eval_args.get('DATA','CAM_TYPE')
try:
obj_id = eval_args.getint('DATA', 'OBJ_ID')
except:
obj_id = eval(eval_args.get('DATA', 'OBJECTS'))[0]
cfg_string = str(dataset_name)
current_config_hash = hashlib.md5(cfg_string).hexdigest()
current_file_name = os.path.join(dataset_path, current_config_hash + '.npy')
if os.path.exists(current_file_name):
obj_scene_dict = np.load(current_file_name).item()
else:
p = dataset_params.get_dataset_params(dataset_name, model_type='', train_type='', test_type=cam_type, cam_type=cam_type)
obj_scene_dict = {}
scene_gts = []
for scene_id in range(1,p['scene_count']+1):
print(scene_id)
scene_gts.append(inout.load_yaml(p['scene_gt_mpath'].format(scene_id)))
for obj in range(1,p['obj_count']+1):
eval_scenes = set()
for scene_i,scene_gt in enumerate(scene_gts):
for view_gt in scene_gt[0]:
if view_gt['obj_id'] == obj:
eval_scenes.add(scene_i+1)
obj_scene_dict[obj] = list(eval_scenes)
np.save(current_file_name,obj_scene_dict)
eval_scenes = obj_scene_dict[obj_id]
return eval_scenes
def select_img_crops(crop_candidates, test_crops_depth, bbs, bb_scores, visibs, eval_args):
estimate_bbs = eval_args.getboolean('BBOXES', 'ESTIMATE_BBS')
single_instance = eval_args.getboolean('BBOXES', 'SINGLE_INSTANCE')
icp = eval_args.getboolean('EVALUATION', 'ICP')
if single_instance and estimate_bbs:
idcs = np.array([np.argmax(bb_scores)])
elif single_instance and not estimate_bbs:
idcs = np.array([ | np.argmax(visibs) | numpy.argmax |
#!/usr/bin/env python
'''
Fetches invoices based on filenames from a text file and proceeds
to crop out each gold field's content based on it's textbox, saving
all results into a "crops" directory as .PNG files.
The gold standard text, field type and even the bounding box coordinates
for each crop out are stored in the .PNG's metadata under the keys "text",
"labeltype" and "bbox".
^^^
This would have been the original functionality, but this is a "light" version, only operating with synthetic
data. All the real data infrastructure has been removed.
'''
from shutil import copyfile
import sys
from random import choice as oldchoice
from random import randint as randint
import traceback
import itertools
from collections import defaultdict
from PIL import Image, ImageDraw, ImageFont
from PIL import PngImagePlugin
import numpy as np
from sklearn.utils import check_random_state
import cairocffi as cairo
from scipy import ndimage
from fakestrings import randomstring
from scipy.ndimage.filters import gaussian_filter
from numpy import *
def weighted_choice(choices):
total = sum(w for c, w in choices)
r = random.uniform(0, total)
upto = 0
for c, w in choices:
if upto + w >= r:
return c
upto += w
assert False, "Shouldn't get here"
def speckle(img):
severity = np.random.uniform(0, 0.3)
blur = ndimage.gaussian_filter( | np.random.randn(*img.shape) | numpy.random.randn |
import time
import os
import datetime
import pathlib
import json
import cv2
import carla
from leaderboard.autoagents import autonomous_agent
from team_code.planner import RoutePlanner
import numpy as np
from PIL import Image, ImageDraw
SAVE_PATH = os.environ.get('SAVE_PATH', None)
class BaseAgent(autonomous_agent.AutonomousAgent):
def setup(self, path_to_conf_file):
self.track = autonomous_agent.Track.SENSORS
self.config_path = path_to_conf_file
self.step = -1
self.wall_start = time.time()
self.initialized = False
self._sensor_data = {
'width': 400,
'height': 300,
'fov': 100
}
self._3d_bb_distance = 50
self.weather_id = None
self.save_path = None
if SAVE_PATH is not None:
now = datetime.datetime.now()
string = pathlib.Path(os.environ['ROUTES']).stem + '_'
string += '_'.join(map(lambda x: '%02d' % x, (now.month, now.day, now.hour, now.minute, now.second)))
print (string)
self.save_path = pathlib.Path(os.environ['SAVE_PATH']) / string
self.save_path.mkdir(parents=True, exist_ok=False)
for sensor in self.sensors():
if hasattr(sensor, 'save') and sensor['save']:
(self.save_path / sensor['id']).mkdir()
(self.save_path / '3d_bbs').mkdir(parents=True, exist_ok=True)
(self.save_path / 'affordances').mkdir(parents=True, exist_ok=True)
(self.save_path / 'measurements').mkdir(parents=True, exist_ok=True)
(self.save_path / 'lidar').mkdir(parents=True, exist_ok=True)
(self.save_path / 'semantic_lidar').mkdir(parents=True, exist_ok=True)
(self.save_path / 'topdown').mkdir(parents=True, exist_ok=True)
for pos in ['front', 'left', 'right', 'rear']:
for sensor_type in ['rgb', 'seg', 'depth', '2d_bbs']:
name = sensor_type + '_' + pos
(self.save_path / name).mkdir()
def _init(self):
self._command_planner = RoutePlanner(7.5, 25.0, 257)
self._command_planner.set_route(self._global_plan, True)
self.initialized = True
self._sensor_data['calibration'] = self._get_camera_to_car_calibration(self._sensor_data)
self._sensors = self.sensor_interface._sensors_objects
def _get_position(self, tick_data):
gps = tick_data['gps']
gps = (gps - self._command_planner.mean) * self._command_planner.scale
return gps
def sensors(self):
return [
{
'type': 'sensor.camera.rgb',
'x': 1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'rgb_front'
},
{
'type': 'sensor.camera.semantic_segmentation',
'x': 1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'seg_front'
},
{
'type': 'sensor.camera.depth',
'x': 1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'depth_front'
},
{
'type': 'sensor.camera.rgb',
'x': -1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': 180.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'rgb_rear'
},
{
'type': 'sensor.camera.semantic_segmentation',
'x': -1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': 180.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'seg_rear'
},
{
'type': 'sensor.camera.depth',
'x': -1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': 180.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'depth_rear'
},
{
'type': 'sensor.camera.rgb',
'x': 1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': -60.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'rgb_left'
},
{
'type': 'sensor.camera.semantic_segmentation',
'x': 1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': -60.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'seg_left'
},
{
'type': 'sensor.camera.depth',
'x': 1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': -60.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'depth_left'
},
{
'type': 'sensor.camera.rgb',
'x': 1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': 60.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'rgb_right'
},
{
'type': 'sensor.camera.semantic_segmentation',
'x': 1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': 60.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'seg_right'
},
{
'type': 'sensor.camera.depth',
'x': 1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': 60.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'depth_right'
},
{
'type': 'sensor.lidar.ray_cast',
'x': 1.3, 'y': 0.0, 'z': 2.5,
'roll': 0.0, 'pitch': 0.0, 'yaw': -90.0,
'id': 'lidar'
},
{
'type': 'sensor.lidar.ray_cast_semantic',
'x': 1.3, 'y': 0.0, 'z': 2.5,
'roll': 0.0, 'pitch': 0.0, 'yaw': -90.0,
'id': 'semantic_lidar'
},
{
'type': 'sensor.other.imu',
'x': 0.0, 'y': 0.0, 'z': 0.0,
'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,
'sensor_tick': 0.05,
'id': 'imu'
},
{
'type': 'sensor.other.gnss',
'x': 0.0, 'y': 0.0, 'z': 0.0,
'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,
'sensor_tick': 0.01,
'id': 'gps'
},
{
'type': 'sensor.speedometer',
'reading_frequency': 20,
'id': 'speed'
}
]
def tick(self, input_data):
self.step += 1
affordances = self._get_affordances()
traffic_lights = self._find_obstacle('*traffic_light*')
stop_signs = self._find_obstacle('*stop*')
depth = {}
seg = {}
bb_3d = self._get_3d_bbs(max_distance=self._3d_bb_distance)
bb_2d = {}
for pos in ['front', 'left', 'right', 'rear']:
seg_cam = 'seg_' + pos
depth_cam = 'depth_' + pos
_segmentation = np.copy(input_data[seg_cam][1][:, :, 2])
depth[pos] = self._get_depth(input_data[depth_cam][1][:, :, :3])
self._change_seg_tl(_segmentation, depth[pos], traffic_lights)
self._change_seg_stop(_segmentation, depth[pos], stop_signs, seg_cam)
bb_2d[pos] = self._get_2d_bbs(seg_cam, affordances, bb_3d, _segmentation)
#self._draw_2d_bbs(_segmentation, bb_2d[pos])
seg[pos] = _segmentation
rgb_front = cv2.cvtColor(input_data['rgb_front'][1][:, :, :3], cv2.COLOR_BGR2RGB)
rgb_rear = cv2.cvtColor(input_data['rgb_rear'][1][:, :, :3], cv2.COLOR_BGR2RGB)
rgb_left = cv2.cvtColor(input_data['rgb_left'][1][:, :, :3], cv2.COLOR_BGR2RGB)
rgb_right = cv2.cvtColor(input_data['rgb_right'][1][:, :, :3], cv2.COLOR_BGR2RGB)
gps = input_data['gps'][1][:2]
speed = input_data['speed'][1]['speed']
compass = input_data['imu'][1][-1]
depth_front = cv2.cvtColor(input_data['depth_front'][1][:, :, :3], cv2.COLOR_BGR2RGB)
depth_left = cv2.cvtColor(input_data['depth_left'][1][:, :, :3], cv2.COLOR_BGR2RGB)
depth_right = cv2.cvtColor(input_data['depth_right'][1][:, :, :3], cv2.COLOR_BGR2RGB)
depth_rear = cv2.cvtColor(input_data['depth_rear'][1][:, :, :3], cv2.COLOR_BGR2RGB)
weather = self._weather_to_dict(self._world.get_weather())
return {
'rgb_front': rgb_front,
'seg_front': seg['front'],
'depth_front': depth_front,
'2d_bbs_front': bb_2d['front'],
'rgb_rear': rgb_rear,
'seg_rear': seg['rear'],
'depth_rear': depth_rear,
'2d_bbs_rear': bb_2d['rear'],
'rgb_left': rgb_left,
'seg_left': seg['left'],
'depth_left': depth_left,
'2d_bbs_left': bb_2d['left'],
'rgb_right': rgb_right,
'seg_right': seg['right'],
'depth_right': depth_right,
'2d_bbs_right': bb_2d['right'],
'lidar' : input_data['lidar'][1],
'semantic_lidar': input_data['semantic_lidar'][1],
'gps': gps,
'speed': speed,
'compass': compass,
'weather': weather,
'affordances': affordances,
'3d_bbs': bb_3d
}
def save(self, near_node, far_node, near_command, steer, throttle, brake, target_speed, tick_data):
frame = self.step // 10
pos = self._get_position(tick_data)
theta = tick_data['compass']
speed = tick_data['speed']
weather = tick_data['weather']
data = {
'x': pos[0],
'y': pos[1],
'theta': theta,
'speed': speed,
'target_speed': target_speed,
'x_command': far_node[0],
'y_command': far_node[1],
'command': near_command.value,
'steer': steer,
'throttle': throttle,
'brake': brake,
'weather': weather,
'weather_id': self.weather_id,
'near_node_x': near_node[0],
'near_node_y': near_node[1],
'far_node_x': far_node[0],
'far_node_y': far_node[1],
'is_vehicle_present': self.is_vehicle_present,
'is_pedestrian_present': self.is_pedestrian_present,
'is_red_light_present': self.is_red_light_present,
'is_stop_sign_present': self.is_stop_sign_present,
'should_slow': self.should_slow,
'should_brake': self.should_brake,
'angle': self.angle,
'angle_unnorm': self.angle_unnorm,
'angle_far_unnorm': self.angle_far_unnorm,
}
measurements_file = self.save_path / 'measurements' / ('%04d.json' % frame)
f = open(measurements_file, 'w')
json.dump(data, f, indent=4)
f.close()
for pos in ['front', 'left', 'right', 'rear']:
name = 'rgb_' + pos
Image.fromarray(tick_data[name]).save(self.save_path / name / ('%04d.png' % frame))
for sensor_type in ['seg', 'depth']:
name = sensor_type + '_' + pos
Image.fromarray(tick_data[name]).save(self.save_path / name / ('%04d.png' % frame))
for sensor_type in ['2d_bbs']:
name = sensor_type + '_' + pos
np.save(self.save_path / name / ('%04d.npy' % frame), tick_data[name], allow_pickle=True)
Image.fromarray(tick_data['topdown']).save(self.save_path / 'topdown' / ('%04d.png' % frame))
np.save(self.save_path / 'lidar' / ('%04d.npy' % frame), tick_data['lidar'], allow_pickle=True)
np.save(self.save_path / 'semantic_lidar' / ('%04d.npy' % frame), tick_data['semantic_lidar'], allow_pickle=True)
np.save(self.save_path / '3d_bbs' / ('%04d.npy' % frame), tick_data['3d_bbs'], allow_pickle=True)
np.save(self.save_path / 'affordances' / ('%04d.npy' % frame), tick_data['affordances'], allow_pickle=True)
def _weather_to_dict(self, carla_weather):
weather = {
'cloudiness': carla_weather.cloudiness,
'precipitation': carla_weather.precipitation,
'precipitation_deposits': carla_weather.precipitation_deposits,
'wind_intensity': carla_weather.wind_intensity,
'sun_azimuth_angle': carla_weather.sun_azimuth_angle,
'sun_altitude_angle': carla_weather.sun_altitude_angle,
'fog_density': carla_weather.fog_density,
'fog_distance': carla_weather.fog_distance,
'wetness': carla_weather.wetness,
'fog_falloff': carla_weather.fog_falloff,
}
return weather
def _create_bb_points(self, bb):
"""
Returns 3D bounding box world coordinates.
"""
cords = np.zeros((8, 4))
extent = bb[1]
loc = bb[0]
cords[0, :] = np.array([loc[0] + extent[0], loc[1] + extent[1], loc[2] - extent[2], 1])
cords[1, :] = np.array([loc[0] - extent[0], loc[1] + extent[1], loc[2] - extent[2], 1])
cords[2, :] = np.array([loc[0] - extent[0], loc[1] - extent[1], loc[2] - extent[2], 1])
cords[3, :] = np.array([loc[0] + extent[0], loc[1] - extent[1], loc[2] - extent[2], 1])
cords[4, :] = np.array([loc[0] + extent[0], loc[1] + extent[1], loc[2] + extent[2], 1])
cords[5, :] = np.array([loc[0] - extent[0], loc[1] + extent[1], loc[2] + extent[2], 1])
cords[6, :] = np.array([loc[0] - extent[0], loc[1] - extent[1], loc[2] + extent[2], 1])
cords[7, :] = np.array([loc[0] + extent[0], loc[1] - extent[1], loc[2] + extent[2], 1])
return cords
def _translate_tl_state(self, state):
if state == carla.TrafficLightState.Red:
return 0
elif state == carla.TrafficLightState.Yellow:
return 1
elif state == carla.TrafficLightState.Green:
return 2
elif state == carla.TrafficLightState.Off:
return 3
elif state == carla.TrafficLightState.Unknown:
return 4
else:
return None
def _get_affordances(self):
# affordance tl
affordances = {}
affordances["traffic_light"] = None
affecting = self._vehicle.get_traffic_light()
if affecting is not None:
for light in self._traffic_lights:
if light.id == affecting.id:
affordances["traffic_light"] = self._translate_tl_state(self._vehicle.get_traffic_light_state())
affordances["stop_sign"] = self._affected_by_stop
return affordances
def _get_3d_bbs(self, max_distance=50):
bounding_boxes = {
"traffic_lights": [],
"stop_signs": [],
"vehicles": [],
"pedestrians": []
}
bounding_boxes['traffic_lights'] = self._find_obstacle_3dbb('*traffic_light*', max_distance)
bounding_boxes['stop_signs'] = self._find_obstacle_3dbb('*stop*', max_distance)
bounding_boxes['vehicles'] = self._find_obstacle_3dbb('*vehicle*', max_distance)
bounding_boxes['pedestrians'] = self._find_obstacle_3dbb('*walker*', max_distance)
return bounding_boxes
def _get_2d_bbs(self, seg_cam, affordances, bb_3d, seg_img):
"""Returns a dict of all 2d boundingboxes given a camera position, affordances and 3d bbs
Args:
seg_cam ([type]): [description]
affordances ([type]): [description]
bb_3d ([type]): [description]
Returns:
[type]: [description]
"""
bounding_boxes = {
"traffic_light": list(),
"stop_sign": list(),
"vehicles": list(),
"pedestrians": list()
}
if affordances['stop_sign']:
baseline = self._get_2d_bb_baseline(self._target_stop_sign)
bb = self._baseline_to_box(baseline, seg_cam)
if bb is not None:
bounding_boxes["stop_sign"].append(bb)
if affordances['traffic_light'] is not None:
baseline = self._get_2d_bb_baseline(self._vehicle.get_traffic_light(), distance=8)
tl_bb = self._baseline_to_box(baseline, seg_cam, height=.5)
if tl_bb is not None:
bounding_boxes["traffic_light"].append({
"bb": tl_bb,
"state": self._translate_tl_state(self._vehicle.get_traffic_light_state())
})
for vehicle in bb_3d["vehicles"]:
trig_loc_world = self._create_bb_points(vehicle).T
cords_x_y_z = self._world_to_sensor(trig_loc_world, self._get_sensor_position(seg_cam), False)
cords_x_y_z = np.array(cords_x_y_z)[:3, :]
veh_bb = self._coords_to_2d_bb(cords_x_y_z)
if veh_bb is not None:
if np.any(seg_img[veh_bb[0][1]:veh_bb[1][1],veh_bb[0][0]:veh_bb[1][0]] == 10):
bounding_boxes["vehicles"].append(veh_bb)
for pedestrian in bb_3d["pedestrians"]:
trig_loc_world = self._create_bb_points(pedestrian).T
cords_x_y_z = self._world_to_sensor(trig_loc_world, self._get_sensor_position(seg_cam), False)
cords_x_y_z = np.array(cords_x_y_z)[:3, :]
ped_bb = self._coords_to_2d_bb(cords_x_y_z)
if ped_bb is not None:
if np.any(seg_img[ped_bb[0][1]:ped_bb[1][1],ped_bb[0][0]:ped_bb[1][0]] == 4):
bounding_boxes["pedestrians"].append(ped_bb)
return bounding_boxes
def _draw_2d_bbs(self, seg_img, bbs):
"""For debugging only
Args:
seg_img ([type]): [description]
bbs ([type]): [description]
"""
for bb_type in bbs:
_region = np.zeros(seg_img.shape)
if bb_type == "traffic_light":
for bb in bbs[bb_type]:
_region = np.zeros(seg_img.shape)
box = bb['bb']
_region[box[0][1]:box[1][1],box[0][0]:box[1][0]] = 1
seg_img[(_region == 1)] = 23
else:
for bb in bbs[bb_type]:
_region[bb[0][1]:bb[1][1],bb[0][0]:bb[1][0]] = 1
if bb_type == "stop_sign":
seg_img[(_region == 1)] = 26
elif bb_type == "vehicles":
seg_img[(_region == 1)] = 10
elif bb_type == "pedestrians":
seg_img[(_region == 1)] = 4
def _find_obstacle_3dbb(self, obstacle_type, max_distance=50):
"""Returns a list of 3d bounding boxes of type obstacle_type.
If the object does have a bounding box, this is returned. Otherwise a bb
of size 0.5,0.5,2 is returned at the origin of the object.
Args:
obstacle_type (String): Regular expression
max_distance (int, optional): max search distance. Returns all bbs in this radius. Defaults to 50.
Returns:
List: List of Boundingboxes
"""
obst = list()
_actors = self._world.get_actors()
_obstacles = _actors.filter(obstacle_type)
for _obstacle in _obstacles:
distance_to_car = _obstacle.get_transform().location.distance(self._vehicle.get_location())
if 0 < distance_to_car <= max_distance:
if hasattr(_obstacle, 'bounding_box'):
loc = _obstacle.bounding_box.location
_obstacle.get_transform().transform(loc)
extent = _obstacle.bounding_box.extent
_rotation_matrix = self.get_matrix(carla.Transform(carla.Location(0,0,0), _obstacle.get_transform().rotation))
rotated_extent = np.squeeze(np.array((np.array([[extent.x, extent.y, extent.z, 1]]) @ _rotation_matrix)[:3]))
bb = np.array([
[loc.x, loc.y, loc.z],
[rotated_extent[0], rotated_extent[1], rotated_extent[2]]
])
else:
loc = _obstacle.get_transform().location
bb = np.array([
[loc.x, loc.y, loc.z],
[0.5, 0.5, 2]
])
obst.append(bb)
return obst
def _get_2d_bb_baseline(self, obstacle, distance=2, cam='seg_front'):
"""Returns 2 coordinates for the baseline for 2d bbs in world coordinates
(distance behind trigger volume, as seen from camera)
Args:
obstacle (Actor): obstacle with
distance (int, optional): Distance behind trigger volume. Defaults to 2.
Returns:
np.ndarray: Baseline
"""
trigger = obstacle.trigger_volume
bb = self._create_2d_bb_points(trigger)
trig_loc_world = self._trig_to_world(bb, obstacle, trigger)
#self._draw_line(trig_loc_world[:,0], trig_loc_world[:,3], 0.7, color=(0, 255, 255))
cords_x_y_z = np.array(self._world_to_sensor(trig_loc_world, self._get_sensor_position(cam)))
indices = (-cords_x_y_z[0]).argsort()
# check crooked up boxes
if self._get_dist(cords_x_y_z[:,indices[0]],cords_x_y_z[:,indices[1]]) < self._get_dist(cords_x_y_z[:,indices[0]],cords_x_y_z[:,indices[2]]):
cords = cords_x_y_z[:, [indices[0],indices[2]]] + np.array([[distance],[0],[0],[0]])
else:
cords = cords_x_y_z[:, [indices[0],indices[1]]] + np.array([[distance],[0],[0],[0]])
sensor_world_matrix = self.get_matrix(self._get_sensor_position(cam))
baseline = np.dot(sensor_world_matrix, cords)
return baseline
def _baseline_to_box(self, baseline, cam, height=1):
"""Transforms a baseline (in world coords) into a 2d box (in sensor coords)
Args:
baseline ([type]): [description]
cam ([type]): [description]
height (int, optional): Box height. Defaults to 1.
Returns:
[type]: Box in sensor coords
"""
cords_x_y_z = np.array(self._world_to_sensor(baseline, self._get_sensor_position(cam))[:3, :])
cords = np.hstack((cords_x_y_z, np.fliplr(cords_x_y_z + np.array([[0],[0],[height]]))))
return self._coords_to_2d_bb(cords)
def _coords_to_2d_bb(self, cords):
"""Returns coords of a 2d box given points in sensor coords
Args:
cords ([type]): [description]
Returns:
[type]: [description]
"""
cords_y_minus_z_x = np.vstack((cords[1, :], -cords[2, :], cords[0, :]))
bbox = (self._sensor_data['calibration'] @ cords_y_minus_z_x).T
camera_bbox = np.vstack([bbox[:, 0] / bbox[:, 2], bbox[:, 1] / bbox[:, 2], bbox[:, 2]]).T
if np.any(camera_bbox[:,2] > 0):
camera_bbox = np.array(camera_bbox)
_positive_bb = camera_bbox[camera_bbox[:,2] > 0]
min_x = int(np.clip(np.min(_positive_bb[:,0]), 0, self._sensor_data['width']))
min_y = int(np.clip(np.min(_positive_bb[:,1]), 0, self._sensor_data['height']))
max_x = int(np.clip(np.max(_positive_bb[:,0]), 0, self._sensor_data['width']))
max_y = int(np.clip(np.max(_positive_bb[:,1]), 0, self._sensor_data['height']))
return [(min_x,min_y),(max_x,max_y)]
else:
return None
def _change_seg_stop(self, seg_img, depth_img, stop_signs, cam, _region_size=6):
"""Adds a stop class to the segmentation image
Args:
seg_img ([type]): [description]
depth_img ([type]): [description]
stop_signs ([type]): [description]
cam ([type]): [description]
_region_size (int, optional): [description]. Defaults to 6.
"""
for stop in stop_signs:
_dist = self._get_distance(stop.get_transform().location)
_region = np.abs(depth_img - _dist)
seg_img[(_region < _region_size) & (seg_img == 12)] = 26
# lane markings
trigger = stop.trigger_volume
_trig_loc_world = self._trig_to_world(np.array([[0], [0], [0], [1.0]]).T, stop, trigger)
_x = self._world_to_sensor(_trig_loc_world, self._get_sensor_position(cam))[0,0]
if _x > 0: # stop is in front of camera
bb = self._create_2d_bb_points(trigger, 4)
trig_loc_world = self._trig_to_world(bb, stop, trigger)
cords_x_y_z = self._world_to_sensor(trig_loc_world, self._get_sensor_position(cam), True)
#if cords_x_y_z.size:
cords_x_y_z = cords_x_y_z[:3, :]
cords_y_minus_z_x = np.concatenate([cords_x_y_z[1, :], -cords_x_y_z[2, :], cords_x_y_z[0, :]])
bbox = (self._sensor_data['calibration'] @ cords_y_minus_z_x).T
camera_bbox = np.concatenate([bbox[:, 0] / bbox[:, 2], bbox[:, 1] / bbox[:, 2], bbox[:, 2]], axis=1)
if np.any(camera_bbox[:,2] > 0):
camera_bbox = np.array(camera_bbox)
polygon = [(camera_bbox[i, 0], camera_bbox[i, 1]) for i in range(len(camera_bbox))]
img = Image.new('L', (self._sensor_data['width'], self._sensor_data['height']), 0)
ImageDraw.Draw(img).polygon(polygon, outline=1, fill=1)
_region = np.array(img)
#seg_img[(_region == 1)] = 27
seg_img[(_region == 1) & (seg_img == 6)] = 27
def _trig_to_world(self, bb, parent, trigger):
"""Transforms the trigger coordinates to world coordinates
Args:
bb ([type]): [description]
parent ([type]): [description]
trigger ([type]): [description]
Returns:
[type]: [description]
"""
bb_transform = carla.Transform(trigger.location)
bb_vehicle_matrix = self.get_matrix(bb_transform)
vehicle_world_matrix = self.get_matrix(parent.get_transform())
bb_world_matrix = vehicle_world_matrix @ bb_vehicle_matrix
world_cords = bb_world_matrix @ bb.T
return world_cords
def _create_2d_bb_points(self, actor_bb, scale_factor=1):
"""
Returns 2D floor bounding box for an actor.
"""
cords = np.zeros((4, 4))
extent = actor_bb.extent
x = extent.x * scale_factor
y = extent.y * scale_factor
z = extent.z * scale_factor
cords[0, :] = np.array([x, y, 0, 1])
cords[1, :] = np.array([-x, y, 0, 1])
cords[2, :] = | np.array([-x, -y, 0, 1]) | numpy.array |
import numpy as np
from skimage.io import imread, imsave
def pad_images( img, pad_width_ratio=0.1 ):
'''
pad a 2D (+channel) image so that the outpute shape is img.shape*1.1
the pixel values on the edge follow a normal distribution with :
- mean = avg-1*std of the outer 0.1 layer of the image
- std = std of the outer 0.1 layer of the image
Note:
- this function is useful when user wants to study objects touching the boundary
- Images will be saved in the "padded_images" subfolder
- User should use this subfolder as input for further analysis
'''
_type = img.dtype
if img.ndim==2:
img = np.expand_dims(img, 0)
shape = img.shape[1:]
layer_width = (int(shape[0]*0.1), int(shape[1]*0.1))
outermask = np.ones(shape)
outermask[layer_width[0]:-layer_width[0],
layer_width[1]:-layer_width[1]] = 0
img_padded = []
for i in img:
# compute mean and std of outer layer
img_masked = i * outermask
img_masked[outermask==0] = np.nan
mean = np.nanmean(img_masked)
std = np.nanstd(img_masked)
mean = mean-std
# pad image plane with nans
i_padded = np.pad(i, ((layer_width[0],layer_width[0]),(layer_width[1],layer_width[1])), mode='constant', constant_values=0)
# padding values
padding_values = np.clip(mean+std*np.random.randn(*i_padded.shape),0,None).astype(np.uint16)
padding_values[layer_width[0]:-layer_width[0],
layer_width[1]:-layer_width[1]] = 0
# sum the padding values
i_padded = i_padded+padding_values
# place image plane in full image
img_padded.append(i_padded)
img_padded = | np.array(img_padded) | numpy.array |
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common corruptions to images.
Define 15+4 common image corruptions: Gaussian noise, shot noise, impulse_noise,
defocus blur, frosted glass blur, zoom blur, fog, brightness, contrast, elastic,
pixelate, jpeg compression, frost, snow, and motion blur.
4 extra corruptions: gaussian blur, saturate, spatter, and speckle noise.
"""
import io
import subprocess
import tempfile
import numpy as np
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
# To be populated by download_manager
FROST_FILENAMES = []
def _imagemagick_bin():
return 'imagemagick' # pylint: disable=unreachable
# /////////////// Corruption Helpers ///////////////
def around_and_astype(x):
"""Round a numpy array, and convert to uint8.
Args:
x: numpy array.
Returns:
numpy array with dtype uint8.
"""
return np.around(x).astype(np.uint8)
def disk(radius, alias_blur=0.1, dtype=np.float32):
"""Generating a Gaussian blurring kernel with disk shape.
Generating a Gaussian blurring kernel with disk shape using cv2 API.
Args:
radius: integer, radius of blurring kernel.
alias_blur: float, standard deviation of Gaussian blurring.
dtype: data type of kernel
Returns:
cv2 object of the Gaussian blurring kernel.
"""
if radius <= 8:
length = np.arange(-8, 8 + 1)
ksize = (3, 3)
else:
length = np.arange(-radius, radius + 1)
ksize = (5, 5)
x_axis, y_axis = np.meshgrid(length, length)
aliased_disk = np.array((x_axis**2 + y_axis**2) <= radius**2, dtype=dtype)
aliased_disk /= np.sum(aliased_disk)
# supersample disk to antialias
return tfds.core.lazy_imports.cv2.GaussianBlur(
aliased_disk, ksize=ksize, sigmaX=alias_blur)
def clipped_zoom(img, zoom_factor):
"""Zoom image with clipping.
Zoom the central part of the image and clip extra pixels.
Args:
img: numpy array, uncorrupted image.
zoom_factor: numpy array, a sequence of float numbers for zoom factor.
Returns:
numpy array, zoomed image after clipping.
"""
h = img.shape[0]
ch = int(np.ceil(h / float(zoom_factor)))
top_h = (h - ch) // 2
w = img.shape[1]
cw = int(np.ceil(w / float(zoom_factor)))
top_w = (w - cw) // 2
img = tfds.core.lazy_imports.scipy.ndimage.zoom(
img[top_h:top_h + ch, top_w:top_w + cw], (zoom_factor, zoom_factor, 1),
order=1)
# trim off any extra pixels
trim_top_h = (img.shape[0] - h) // 2
trim_top_w = (img.shape[1] - w) // 2
return img[trim_top_h:trim_top_h + h, trim_top_w:trim_top_w + w]
def plasma_fractal(mapsize=512, wibbledecay=3):
"""Generate a heightmap using diamond-square algorithm.
Modification of the algorithm in
https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py
Args:
mapsize: side length of the heightmap, must be a power of two.
wibbledecay: integer, decay factor.
Returns:
numpy 2d array, side length 'mapsize', of floats in [0,255].
"""
if mapsize & (mapsize - 1) != 0:
raise ValueError('mapsize must be a power of two.')
maparray = np.empty((mapsize, mapsize), dtype=np.float_)
maparray[0, 0] = 0
stepsize = mapsize
wibble = 100
def wibbledmean(array):
return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape)
def fillsquares():
"""For each square, calculate middle value as mean of points + wibble."""
cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)
squareaccum += np.roll(squareaccum, shift=-1, axis=1)
maparray[stepsize // 2:mapsize:stepsize,
stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum)
def filldiamonds():
"""For each diamond, calculate middle value as meanof points + wibble."""
mapsize = maparray.shape[0]
drgrid = maparray[stepsize // 2:mapsize:stepsize,
stepsize // 2:mapsize:stepsize]
ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
ldrsum = drgrid + np.roll(drgrid, 1, axis=0)
lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)
ltsum = ldrsum + lulsum
maparray[0:mapsize:stepsize,
stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum)
tdrsum = drgrid + np.roll(drgrid, 1, axis=1)
tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)
ttsum = tdrsum + tulsum
maparray[stepsize // 2:mapsize:stepsize,
0:mapsize:stepsize] = wibbledmean(ttsum)
while stepsize >= 2:
fillsquares()
filldiamonds()
stepsize //= 2
wibble /= wibbledecay
maparray -= maparray.min()
return maparray / maparray.max()
# /////////////// End Corruption Helpers ///////////////
# /////////////// Corruptions ///////////////
def gaussian_noise(x, severity=1):
"""Gaussian noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added Gaussian noise.
"""
c = [.08, .12, 0.18, 0.26, 0.38][severity - 1]
x = np.array(x) / 255.
x_clip = np.clip(x + np.random.normal(size=x.shape, scale=c), 0, 1) * 255
return around_and_astype(x_clip)
def shot_noise(x, severity=1):
"""Shot noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added shot noise.
"""
c = [60, 25, 12, 5, 3][severity - 1]
x = np.array(x) / 255.
x_clip = np.clip(np.random.poisson(x * c) / float(c), 0, 1) * 255
return around_and_astype(x_clip)
def impulse_noise(x, severity=1):
"""Impulse noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added impulse noise.
"""
c = [.03, .06, .09, 0.17, 0.27][severity - 1]
x = tfds.core.lazy_imports.skimage.util.random_noise(
np.array(x) / 255., mode='s&p', amount=c)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip)
def defocus_blur(x, severity=1):
"""Defocus blurring to images.
Apply defocus blurring to images using Gaussian kernel.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied defocus blur.
"""
c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][severity - 1]
x = np.array(x) / 255.
kernel = disk(radius=c[0], alias_blur=c[1])
channels = []
for d in range(3):
channels.append(tfds.core.lazy_imports.cv2.filter2D(x[:, :, d], -1, kernel))
channels = np.array(channels).transpose((1, 2, 0)) # 3x224x224 -> 224x224x3
x_clip = np.clip(channels, 0, 1) * 255
return around_and_astype(x_clip)
def glass_blur(x, severity=1):
"""Frosted glass blurring to images.
Apply frosted glass blurring to images by shuffling pixels locally.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied frosted glass blur.
"""
# sigma, max_delta, iterations
c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2),
(1.5, 4, 2)][severity - 1]
x = np.uint8(
tfds.core.lazy_imports.skimage.filters.gaussian(
np.array(x) / 255., sigma=c[0], multichannel=True) * 255)
# locally shuffle pixels
for _ in range(c[2]):
for h in range(x.shape[0] - c[1], c[1], -1):
for w in range(x.shape[1] - c[1], c[1], -1):
dx, dy = np.random.randint(-c[1], c[1], size=(2,))
h_prime, w_prime = h + dy, w + dx
# swap
x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w]
x_clip = np.clip(
tfds.core.lazy_imports.skimage.filters.gaussian(
x / 255., sigma=c[0], multichannel=True), 0, 1)
x_clip *= 255
return around_and_astype(x_clip)
def zoom_blur(x, severity=1):
"""Zoom blurring to images.
Applying zoom blurring to images by zooming the central part of the images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied zoom blur.
"""
c = [
np.arange(1, 1.11, 0.01),
np.arange(1, 1.16, 0.01),
np.arange(1, 1.21, 0.02),
np.arange(1, 1.26, 0.02),
np.arange(1, 1.31, 0.03)
][severity - 1]
x = (np.array(x) / 255.).astype(np.float32)
out = | np.zeros_like(x) | numpy.zeros_like |
#!/usr/bin/python
#-*- coding: utf-8 -*
# SAMPLE FOR SIMPLE CONTROL LOOP TO IMPLEMENT BAXTER_CONTROL MPC ALGORITHMS
"""
MPC sample tracking for Baxter's right limb with specific references.
Authors: <NAME> and <NAME>.
"""
# Built-int imports
import time
import random
# Own imports
import baxter_essentials.baxter_class as bc
import baxter_essentials.transformation as transf
import baxter_control.mpc_controller as b_mpc
# General module imports
import numpy as np
import matplotlib.pyplot as plt
def create_plots(iteration_vector, x_matrix, u_matrix, sample_time, title, name1, name2):
"""
Create simple simulation plots based on vectors
It returns two pop-out matplotlib graphs.
"""
# Define a figure for the creation of the plot
figure_1, all_axes = plt.subplots(x_matrix.shape[0], 1)
current_axes = 0
for axes_i in all_axes:
# Generate the plot and its axes for each Xi and Ui.
axes_i.plot(iteration_vector,
x_matrix[current_axes, :].T, 'b', linewidth=1)
axes_i.plot(iteration_vector,
u_matrix[current_axes, :].T, 'g', linewidth=1)
current_axes = current_axes + 1
# Customize figure with the specific "x"-"y" labels
if (current_axes <= 3):
if (name1 == "x"):
axes_i.set_ylabel("[rad]")
else:
axes_i.set_ylabel("[m]")
else:
axes_i.set_ylabel("[rad]")
# Add labels to each subplot
axes_i.legend(["{}{}".format(name1, current_axes),
"{}{}".format(name2, current_axes)])
# Remove inner axes layers (only leave the outer ones)
axes_i.label_outer()
# Add personalized text to each subplot (at lower-right side)
axes_i.text(0.98,
0.02,
'SGA-EJGG',
verticalalignment='bottom',
horizontalalignment='right',
transform=axes_i.transAxes,
color='black',
fontsize=6
)
# Add grid
axes_i.grid(color='black', linestyle='-', alpha=0.2, linewidth=1)
# Change the background color of the external part
figure_1.patch.set_facecolor((0.2, 1, 1))
# Configure plot title and horizontal x-label
all_axes[0].set_title(title)
all_axes[len(
all_axes) - 1].set_xlabel("Iterations [k] (Ts={} seconds)".format(sample_time))
def calculate_cartesian_vectors(current_thetas):
# CURRENT CARTESIAN POSITION CALCULATIONS...
tm_current = bc.BaxterClass().fpk(current_thetas, "right", 7)
current_position = tm_current[0:3, 3]
current_orientation = transf.Transformation(
0, 0, 0, [0, 0, 0]).get_fixed_angles_from_tm(tm_current)
return np.concatenate([current_position, current_orientation], axis=0).reshape(6, 1)
def test_1_step_response_without_feedback(show_results=True):
"""
Sample loop to plot step response with constant change in each DOF without
any control algorithm (just to see Baxter's "chaos" response)
"""
# Variables for simulation
x_k = np.matrix([[0.1], [0.15], [0.2], [0.25], [0.3], [0.35], [0.4]])
u_k = np.matrix([[0.01], [0.01], [0.01], [0.01], [0.01], [0.01], [0.01]])
# Desired cartesian goal [x_g, y_g, z_g, x_angle_g, y_angle_g, z_angle_g]
# (NOT any goal, just to be able to plot)
cartesian_goal = np.array([0, 0, 0, 0, 0, 0]).reshape(6, 1)
iteration_vector = list()
x_matrix = np.zeros((x_k.shape[0], 0))
u_matrix = np.zeros((u_k.shape[0], 0))
cartesian_matrix = np.zeros((cartesian_goal.shape[0], 0))
cartesian_goal_matrix = np.zeros((cartesian_goal.shape[0], 0))
total_time_in_seconds = 5
sample_time_in_seconds = 0.01
final_time = time.time() + total_time_in_seconds
last_time = 0
iteration = 0
while (time.time() < final_time):
if (time.time() - last_time >= sample_time_in_seconds):
last_time = time.time()
iteration_vector.append(iteration)
if (show_results == True):
print("Iteration (k): ", iteration)
iteration = iteration + 1
x_k_plus_1 = x_k + u_k
x_k = x_k_plus_1
cartesian_k = calculate_cartesian_vectors(x_k)
x_matrix = np.hstack((x_matrix, x_k_plus_1))
u_matrix = np.hstack((u_matrix, u_k))
cartesian_matrix = np.hstack((cartesian_matrix, cartesian_k))
cartesian_goal_matrix = np.hstack(
(cartesian_goal_matrix, cartesian_goal))
if (show_results == True):
print("iteration_vector:")
print(iteration_vector)
print("len(iteration_vector):")
print(len(iteration_vector))
print("u_matrix:")
print(u_matrix)
print("x_matrix:")
print(x_matrix)
print("x_matrix.shape:")
print(x_matrix.shape)
create_plots(
iteration_vector,
cartesian_matrix,
cartesian_goal_matrix,
sample_time_in_seconds,
"Cartesian Values responses based on step respone no feedback",
"current",
"fake-goal"
)
create_plots(
iteration_vector,
x_matrix,
u_matrix,
sample_time_in_seconds,
"X and U vectors response based on step respone no feedback",
"x",
"u"
)
plt.show()
def test_2_mpc_first_attempt(show_results=True):
"""
Sample control loop to test MPC algorithm on Baxter right limbr for custom
variables such as N, total_time, sample_time, cartesian_goal, x0, u0 and
validate the resulting plots with or without noise.
"""
# Main conditions for executing the control loop with MPC algorithm
N = 1 # Prediction horizon
total_time_in_seconds = 20
sample_time_in_seconds = 0.1
# Initial conditions for states and inputs
x0 = np.array(
[
0.39500005288049406,
-1.2831749290661485,
-0.18867963690990588,
2.5905100555414924,
-0.11428156869746332,
-1.3506700837331067,
0.11504855909140603
]
).reshape(7, 1)
u0 = np.array([0, 0, 0, 0, 0, 0, 0]).reshape(7, 1)
# Number inputs (same as number of degrees of freedom)
nu = u0.shape[0]
# Initial cartesian_goal "default" value
cartesian_goal = np.array(
[
[
-0.9,
-1.0,
1.1,
0.6660425877100662,
1.5192944057794895,
-1.3616725381467032
],
] * N
).transpose().reshape(6, N)
# ---------- Main Control loop -------------
# Variables for control loop
x_k = x0
u_k = u0
iteration_vector = list()
x_matrix = np.zeros((x_k.shape[0], 0))
u_matrix = np.zeros((u_k.shape[0], 0))
cartesian_matrix = np.zeros((cartesian_goal.shape[0], 0))
cartesian_goal_matrix = | np.zeros((cartesian_goal.shape[0], 0)) | numpy.zeros |
import math
import itertools
from sklearn.cluster import DBSCAN
import numpy as np
import pysc2.agents.myAgent.myAgent_13_BIC_DQN.smart_actions as sa
from pysc2.agents.myAgent.myAgent_13_BIC_DQN.config import config
from pysc2.agents.myAgent.myAgent_13_BIC_DQN.tools import unit_list
from pysc2.lib import features
# 十进制转任意进制 用于解析动作列表
def transport(action_number, action_dim):
result = np.zeros(config.MY_UNIT_NUMBER)
an = action_number
ad = action_dim
for i in range(config.MY_UNIT_NUMBER):
if an / ad != 0:
result[config.MY_UNIT_NUMBER - i - 1] = int(an % ad)
an = int(an / ad)
else:
break
return result
def computeDistance(unit, enemy_unit):
x_difference = math.pow(unit.x - enemy_unit.x, 2)
y_difference = math.pow(unit.y - enemy_unit.y, 2)
distance = math.sqrt(x_difference + y_difference)
return distance
def computeDistance_center(unit):
x_difference = math.pow(unit.x - config.MAP_SIZE / 2, 2)
y_difference = math.pow(unit.y - config.MAP_SIZE / 2, 2)
distance = math.sqrt(x_difference + y_difference)
return distance
def get_bound(init_obs, obs):
bounds = []
init_my_units_tag = [unit.tag for unit in init_obs.observation['raw_units'] if unit.alliance == features.PlayerRelative.SELF]
init_enemy_units_tag = [unit.tag for unit in init_obs.observation['raw_units'] if unit.alliance == features.PlayerRelative.ENEMY]
for i in range(config.MY_UNIT_NUMBER):
bound = []
my_unit = find_unit_by_tag(obs, init_my_units_tag[i])
if my_unit is None:
bound.append(1)
for j in range(config.ATTACT_CONTROLLER_ACTIONDIM - config.DEATH_ACTION_DIM):
bound.append(0)
bounds.append(bound)
continue
else:
bound.append(0)
for j in range(config.STATIC_ACTION_DIM):
bound.append(1)
for j in range(config.ENEMY_UNIT_NUMBER):
enemy = find_unit_by_tag(obs, init_enemy_units_tag[j])
if enemy is None:
bound.append(0)
elif computeDistance(my_unit, enemy) >= config.ATTACK_RANGE:
bound.append(0)
else:
bound.append(1)
bounds.append(bound)
return bounds
def find_unit_by_tag(obs, tag):
for unit in obs.observation['raw_units']:
if unit.tag == tag:
return unit
return None
############################################
def assembly_action(init_obs, obs, action_numbers):
actions = []
init_my_units = [unit for unit in init_obs.observation['raw_units'] if unit.alliance == features.PlayerRelative.SELF]
init_enemy_units = [unit for unit in init_obs.observation['raw_units'] if unit.alliance == features.PlayerRelative.ENEMY]
controller = sa.attack_controller
# action_nmbers = transport(action_number, config.ATTACT_CONTROLLER_ACTIONDIM)
for i in range(config.MY_UNIT_NUMBER):
parameter = []
if action_numbers[i] == 0:
continue
elif 0 < action_numbers[i] <= 4:
my_unit = find_unit_by_tag(obs, init_my_units[i].tag)
a = controller[1]
dir = action_numbers[i] - config.DEATH_ACTION_DIM
parameter.append(0)
parameter.append(init_my_units[i].tag)
if dir == 0:
parameter.append((min([my_unit.x + 2, config.MAP_SIZE]), min([my_unit.y + 2, config.MAP_SIZE])))
elif dir == 1:
parameter.append((max([my_unit.x - 2, 0]), max([my_unit.y - 2, 0])))
elif dir == 2:
parameter.append((min([my_unit.x + 2, config.MAP_SIZE]), max([my_unit.y - 2, 0])))
elif dir == 3:
parameter.append((max([my_unit.x - 2, 0]), min([my_unit.y + 2, config.MAP_SIZE])))
parameter = tuple(parameter)
actions.append(a(*parameter))
elif 4 < action_numbers[i] <= 4 + config.ENEMY_UNIT_NUMBER:
my_unit = find_unit_by_tag(obs, init_my_units[i].tag)
a = controller[2]
enemy = int(action_numbers[i] - config.DEATH_ACTION_DIM - config.STATIC_ACTION_DIM)
parameter.append(0)
parameter.append(my_unit.tag)
parameter.append(init_enemy_units[enemy].tag)
parameter = tuple(parameter)
actions.append(a(*parameter))
return actions
def get_agent_state(unit):
states = np.array([])
states = np.append(states, computeDistance_center(unit))
states = np.append(states, unit.alliance)
states = np.append(states, unit.unit_type)
states = np.append(states, unit.x)
states = np.append(states, unit.y)
states = np.append(states, unit.health)
states = np.append(states, unit.shield)
states = np.append(states, unit.weapon_cooldown)
return states
def get_state(init_obs, obs):
state = np.array([])
init_my_units_tag = [unit.tag for unit in init_obs.observation['raw_units'] if unit.alliance == features.PlayerRelative.SELF]
init_enemy_units_tag = [unit.tag for unit in init_obs.observation['raw_units'] if unit.alliance == features.PlayerRelative.ENEMY]
for i in range(config.MY_UNIT_NUMBER):
my_unit = find_unit_by_tag(obs, init_my_units_tag[i])
if my_unit is not None:
my_unit_state = get_agent_state(my_unit)
state = np.append(state, my_unit_state)
else:
state = np.append(state, np.zeros(config.COOP_AGENT_OBDIM))
for i in range(config.ENEMY_UNIT_NUMBER):
enemy_unit = find_unit_by_tag(obs, init_enemy_units_tag[i])
if enemy_unit is not None:
my_unit_state = get_agent_state(enemy_unit)
state = np.append(state, my_unit_state)
else:
state = np.append(state, np.zeros(config.COOP_AGENT_OBDIM))
return state
def get_agents_obs(init_obs, obs):
agents_obs = []
init_my_units_tag = [unit.tag for unit in init_obs.observation['raw_units'] if unit.alliance == features.PlayerRelative.SELF]
init_enemy_units_tag = [unit.tag for unit in init_obs.observation['raw_units'] if unit.alliance == features.PlayerRelative.ENEMY]
for i in range(config.MY_UNIT_NUMBER):
# 一次查找己方单位的信息
agent_obs = np.array([])
my_unit = find_unit_by_tag(obs, init_my_units_tag[i])
if my_unit is None:
# 此时己方单位已死亡,所以观察值全为0
agent_obs = np.zeros(config.COOP_AGENTS_OBDIM)
agents_obs.append(agent_obs)
continue
for j in range(config.MY_UNIT_NUMBER):
my_target_unit = find_unit_by_tag(obs, init_my_units_tag[j])
# 按顺序遍历每个己方单位的信息
if my_target_unit is None or computeDistance(my_unit, my_target_unit) >= config.OB_RANGE:
agent_obs = np.append(agent_obs, np.zeros(8))
else:
agent_obs = np.append(agent_obs, computeDistance(my_unit, my_target_unit))
agent_obs = np.append(agent_obs, my_target_unit.alliance)
agent_obs = np.append(agent_obs, my_target_unit.unit_type)
agent_obs = np.append(agent_obs, my_target_unit.x)
agent_obs = np.append(agent_obs, my_target_unit.y)
agent_obs = np.append(agent_obs, my_target_unit.health)
agent_obs = np.append(agent_obs, my_target_unit.shield)
agent_obs = np.append(agent_obs, my_target_unit.weapon_cooldown)
for j in range(config.ENEMY_UNIT_NUMBER):
enemy_target_unit = find_unit_by_tag(obs, init_enemy_units_tag[j])
# 按顺序遍历每个己方单位的信息
if enemy_target_unit is None or computeDistance(my_unit, enemy_target_unit) >= config.OB_RANGE:
agent_obs = np.append(agent_obs, np.zeros(8))
else:
agent_obs = np.append(agent_obs, computeDistance(my_unit, enemy_target_unit))
agent_obs = np.append(agent_obs, enemy_target_unit.alliance)
agent_obs = np.append(agent_obs, enemy_target_unit.unit_type)
agent_obs = np.append(agent_obs, enemy_target_unit.x)
agent_obs = np.append(agent_obs, enemy_target_unit.y)
agent_obs = np.append(agent_obs, enemy_target_unit.health)
agent_obs = np.append(agent_obs, enemy_target_unit.shield)
agent_obs = np.append(agent_obs, enemy_target_unit.weapon_cooldown)
agents_obs.append(agent_obs)
return agents_obs
def get_reward(obs, pre_obs):
reward = 0
my_units_health = [unit.health for unit in obs.observation['raw_units'] if unit.alliance == features.PlayerRelative.SELF]
enemy_units_health = [unit.health for unit in obs.observation['raw_units'] if unit.alliance == features.PlayerRelative.ENEMY]
# reward = len(my_units_health) / (len(my_units_health) + len(enemy_units_health))
my_units_health_pre = [unit.health for unit in pre_obs.observation['raw_units'] if unit.alliance == features.PlayerRelative.SELF]
enemy_units_health_pre = [unit.health for unit in pre_obs.observation['raw_units'] if unit.alliance == features.PlayerRelative.ENEMY]
if len(enemy_units_health) == 0:
reward = sum(my_units_health) + 200
return reward / 200
if len(my_units_health) == 0:
reward = -sum(my_units_health) - 200
return reward / 200
if len(my_units_health) < len(my_units_health_pre):
reward -= (len(my_units_health_pre) - len(my_units_health)) * 10
if len(enemy_units_health) < len(enemy_units_health_pre):
reward += (len(enemy_units_health_pre) - len(enemy_units_health)) * 10
reward += (sum(my_units_health) - sum(my_units_health_pre)) / 2 - (sum(enemy_units_health) - sum(enemy_units_health_pre))
return float(reward) / 200
def win_or_loss(obs):
if obs.last():
# my_units = [unit for unit in obs.observation['raw_units'] if unit.alliance == features.PlayerRelative.SELF]
enemy_units = [unit for unit in obs.observation['raw_units'] if unit.alliance == features.PlayerRelative.ENEMY]
if len(enemy_units) == 0:
return 1
else:
return -1
else:
return 0
############test############
def get_bound_test(my_units, enemy_units):
bound = np.zeros(np.power(config.ATTACT_CONTROLLER_ACTIONDIM, config.MY_UNIT_NUMBER))
leagal_actions = []
for i in range(config.MY_UNIT_NUMBER):
if i >= len(my_units):
leagal_actions.append([0])
continue
else:
action = []
action.append(0)
for j in range(config.ENEMY_UNIT_NUMBER):
if j >= len(enemy_units):
action.append(0)
else:
action.append(1)
leagal_actions.append(list( | np.nonzero(action) | numpy.nonzero |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 08 17:46:45 2017
@author: apfranco
"""
import numpy as np
import scipy
from scipy.optimize import leastsq
def RockPhysicsCalibration(agd, OM):
# ALGORITMO PARA CALIBRACAO DE MODELOS DE FISICA DE ROCHA
#
# MODELOS
# 1 - porosidade de neutrons:
# phi = A + B phiE + C vsh ou
# 2 - raios gama:
# gr = grmin + (grmax - grmin) vsh
# 3 - modelo densidade:
# rho = rhoq + (rhof-rhoq) * phiE + (rhoc-rhoq) * vsh * (1 - phiE);
# 4 - resistividade:
# 1/ Rt = ( phiE**m Sw**n ) / ( a Rw (1-chi) ) + ( chi Sw ) / Rsh
#
# DESCRICAO GERAL:
# O programa deve ser rodado para gerar os coefientes e densidades acima descritos
# para serem usados em etapas posteriores de inferencia de porosidade,
# volume de argila e saturacao. O programa fornece opcao de entrada de
# limites estratigraficos conhecidos, realizando uma calibracao geral para
# todo o pacote e tambem em grupos separados em funcao do volume de
# folhelho como funcao de um valor de corte (cutclay). O programa fornece 3
# opcoes de saida envolvendo calibracao em todo o segmento analizado, em
# segmentos menores definidos na entrada (secHoriz) ou em nesses mesmos segmentos
# menores subdivididos ainda mais em funcao do conteudo de folhelho.
#
# PARAMETROS DE ENTRADA:
# dados de perfis - raios gama, porosidade, densidade, VP e VS
# dados de testemunho (se disponiveis) - volume de argila, porosidade, densidade
# top, bot - limites superior e inferior da secao a ser analisada
# phiSand - porosidade de areia homogenea (zero em conteudo de argila)
# grmin, grmax - valores minimo e maximo para a conversao de raios gama em volume de folhelho
# cutclay - valor limite para a transicao de areia para folhelho (grao para matriz suportada)
# secHoriz - Matriz (nFac x 2) contendo os limites superior e inferior de cada unidade estratigrafica
# satUncert - =0 desliga seletor de calibracao para horizonte com oleo.
# Caso contrario iOut necesariamente igual a 3
# iOut - seletor de detalhamento de facies para saida de parametros 1, 2,
# ou 3, conforme explicado acima.
# modPhiC - seletor do tipo de porosidade de calibracao (porosidade
# efetiva): = 1 perfil porosidade de neutros; = 2 porosidade
# efetiva independente (ex. testemunho); = 3 porosidade efetiva
# calculada pela formula 1 acima.
# OBS: CUIDADO opcao modPhiC = 3 carece de aprimoramentos devendo ser usada em
# casos muito especificos. Em geral produz matrizes mal condicionadas.
#
# PARAMETROS DE SAIDA:
# calibData_nomePoco - arquivo contendo os dados de referencia para o processo de calibracao
# phiC
# clayC
# rhoC
# resC
# calibCPR_Vel_nomePoco - arquivo contendo os parametros do modelo linear de velocidade de Han
# facies
# phiSand
# neutron
# denLitho
# cValuesPhi
# cValuesChi
# covMatrixPar
# coefVP
# coefVS
# fluidProp
# fluidPars
print ("CHAMANDO A FUNCAO EM ALGO")
#Parametros de entrada
inputPars = agd.get_input()
well_uid = agd.get_well_uid()
log_index = OM.list('log', well_uid)[0]
indexes = log_index.get_index()[0]
z = indexes[0].data
topCL = inputPars.get('topCL', None) #Intervalo para calibracao (com agua)
botCL = inputPars.get('botCL', None)
top = inputPars.get('top', None) #Intervalo para inferencia
bot = inputPars.get('bot', None)
indLog = np.argwhere(np.logical_and(z>=top, z<=bot))
indLog = np.squeeze(indLog,1)
#Input dos Perfis de pressao
press_file = np.loadtxt('U:/bkp_Windows06nov2017/Documents/Pocos_Morena/MA20.prs')
z = z[indLog]
gr = inputPars.get('gr', None )
gr = gr[indLog]
gr = logInterp(gr,z)
phi = inputPars.get('phi', None )
phi = phi[indLog]
phi = logInterp(phi,z)
rhoFull = inputPars.get('rho', None )
rho = rhoFull[indLog]
rho = logInterp(rho,z)
res = inputPars.get('res', None )
res = res[indLog]
if (np.all(res == np.NaN)):
res = np.empty(np.size(indLog))
else:
res = logInterp(res,z)
fac = inputPars.get('fac', None )
fac = fac[indLog]
fac = np.array(np.floor(fac), dtype=int)
fac = logInterp(fac,z)
#Input dos Perfis de pressao
zProv = indexes[0].data
mpp = 0.0980665*press_file[:,0]
mtzp = press_file[:,1]
lpres, cpres = np.shape(press_file)
if (cpres == 3):
mmzp = press_file[:,cpres - 1]
else:
mmzp = np.empty([0,0])
nDP = np.size(mtzp)
tvdss = inputPars.get('tvdss', None )
tvdss = tvdss[indLog]
izp = np.empty(nDP, dtype=int)
if (np.size(mmzp) == 0):
indr = indLog
lindr = np.size(indr) - 1
tol = 0.1
for i in range (0, nDP):
indp = np.argwhere(np.logical_and(tvdss <= (mtzp[i] + tol), tvdss >= (mtzp[i] - tol)))
indp= np.squeeze(indp,1)
cizp = np.argwhere(np.logical_and(indp >= indr[0], indp <= indr[lindr]))
cizp= np.squeeze(cizp,1)
if (np.size(cizp) == 0):
izp[i] = np.argmin(np.abs(tvdss - mtzp[i]))
else:
izp[i] = indp[cizp[0]]
mzp = zProv[izp]
matsort = np.concatenate([[mzp],[mpp], [mtzp],[izp]]).T
indsort = np.argsort(matsort[:,0],0)
matsort = np.array([[matsort[indsort,0]],[matsort[indsort,1]],[matsort[indsort,2]],[matsort[indsort,3]]]).T
matsort = np.squeeze(matsort)
mzp = matsort[:,0]
mpp = matsort[:,1]
mtzp = matsort[:,2]
izp = matsort[:,3].astype(int)
zp = zProv[izp[0]:izp[nDP - 1] + 1]
rhopp = rhoFull[izp[0]:izp[nDP - 1] + 1]
rhopp = logInterp(rhopp, zp)
else:
mzp = mmzp
for i in range (0, nDP):
izp[i] = np.argmin(np.abs(zProv - mzp[i]))
zp = zProv[izp[0]:izp[nDP - 1] + 1]
rhopp = rhoFull[izp[0]:izp[nDP - 1] + 1]
rhopp = logInterp(rhopp, zp)
phiCore = np.empty([0,0])
secHoriz = np.array([top, bot])
#Parametros e dados de calibracao e saida
nFac = 4
modPhiC = 1 #indicador do tipo de dado de calibracao a ser usado como porosidade efetiva
#1: perfil de neutrons 2: perfil de porosidade efetiva
useCore = 0
iOut = 2
#iuseclay = 0 #indicador do tipo de argilosidade a ser usado
#0: vsh direto do perfil 1: clay (calculada atraves do GR)
#Parametros de densidade
rhoMin = np.array([2.55, 2.569, 2.623, 2.707]) #Existem 4 facies na regiao relatada
#Parametros de resistividade
mP = 2.0 # expoente de cimentacao em areias limpas: 1.3 (inconsolidado) - 2.0 (consol.)
nS = 2.0 # expoente de saturacao em areias limpas 1.5 - 2.0.
# E reduzido na presenca de laminacao e microporosidade
aT = 0.8 # constante da eq. de Archie
Rw = 0.028 # resistividade da agua
Rsh = 2.048 # resistividade do folhelho
resCoef = np.array([[mP, nS, aT*Rw, Rsh], [1.5, nS, aT*Rw, Rsh], [2.0, nS, aT*Rw, Rsh], [2.0, nS, aT*Rw, Rsh]])
# Secao de Propriedades dos fluidos e matrizes de areia e folhelho
#Parametros
#calculo da pressao
pres_poros = np.mean(mpp) # pressao de poro referencia para o calc da densidade
temp = 89.0 # temperatura oC
sal = 102400 # salinidade
RGO = 75.0 # razao gas oleo
API = 29.0 # grau API
G = 0.835 # gravidade especifica
#Ordenar parametros no vetor para chamada da funcao
fluidPars = np.array([pres_poros, temp, sal, RGO, API, G])
#AQUI COMECA O CODIGO secCalibVshPhiRhoRes_vpHan
#Trecho de calibracao
indCL = np.where(np.logical_and(z>=topCL, z<=botCL))
nData = np.size(z)
# Calculo de porosidade efetiva e vsh com estimativa dos valores
# de grmin e grmax em todo o pacote coberto pelos dados
# Transformacao dos dados observados
# Volume de folhelho a partir de rais gama
indSh = np.argwhere(fac==4)
indSh= np.squeeze(indSh,1)
indSd = np.argwhere(fac == 1)
indSd= np.squeeze(indSd,1)
if (np.size(indSh) == 0 and np.size(indSd) == 0):
grmax = np.percentile(gr, 95)
grmin = np.percentile(gr, 5)
else:
grmax = np.percentile(gr[indSh], 95) #146.3745
grmin = np.percentile(gr[indSd], 5) #54.2600
claye = vshGRcalc(gr, grmin, grmax)
#Por enquanto usando apenas modPhic == 1
if modPhiC == 1:
grlim = grmax
ind = np.where (gr>= grlim)
phiNsh = np.median(phi[ind])
phiEe = np.fmax(0.01, phi - claye*phiNsh)
modPhiC =2
elif (modPhiC == 2 and np.size(phiCore) == 0):
print ("Nao existe a funcao chamada aqui dentro")
#phiEe = phiSd2phiE (zR, claye, phiSand, secHoriz)
elif (modPhiC == 2 and useCore == 1 ):
phiEe = phiCore
#fluidProp matriz com valores para Kf e densidade para fases salmoura,
#oleo e gas, ordenados da seguinte forma:
#bulk_salmoura, bulk_oleo, bulk_gas (modulo variavel com a pressao
#rho_salmoura, rho_oleo, rho_gas (so a densidade sera fixa)
nDP = np.size(mpp)
fluidPropP = np.empty([nDP, 2, 3]) #esqueleto de nDP 'paginas' que guardara
#as matrizes 2x3 de retorno da funcao seismicPropFluids
for i in np.arange(0, nDP):
#atualizar pressao de poro
fluidPars[0] = mpp[i]
fluidPropP[i] = seismicPropFluids(fluidPars)
fluidProp = np.mean(fluidPropP, 0)
rhoFluids = fluidProp[1]
rhoW = rhoFluids[0]
rhoO = rhoFluids[1]
#rock physics model calibration
#selecao de perfis apenas na regial de calibracao com agua
phiC = phiEe[indCL]
clayC = claye[indCL]
rhoCL = rho[indCL]
resCL = res[indCL]
phiCL = phi[indCL]
facCL = fac[indCL]
# Calibracao para toda a secao
rhoMin_T = np.median(rhoMin);
opt = 2
if (opt == 1):
[cPhi_T, phiMod_T, cRho_T, rhoMod_T, cRes_T, resMod_T] = calibClayPhiRhoRes(phiCL, rhoCL, resCL, clayC, phiC, rhoMin_T, resCoef, modPhiC)
rhoSd = cRho_T[0]
rhoWe = cRho_T[1]
rhoSh = cRho_T[2]
rhoDisp = cRho_T[2]
else:
[cPhi_T, phiMod_T, cRho_T, rhoMod_T, cRes_T, resMod_T] = calibClayPhiRhoRes2(phiCL, rhoCL, resCL, clayC, phiC , rhoW, resCoef, modPhiC)
rhoSd = cRho_T[0]
rhoWe = rhoW
rhoSh = cRho_T[1]
rhoDisp = cRho_T[1]
phiPar_T = np.concatenate([[cPhi_T[0]], [cPhi_T[1]], [cPhi_T[2]]])
denPar_T = np.concatenate([[rhoSd], [rhoWe], [rhoO], [rhoSh], [rhoDisp]])
resPar_T = cRes_T
[phiMod_T, rhoMod_T, resMod_T] = calibCPRRreMod(phiEe, claye, phiPar_T , denPar_T, resPar_T, modPhiC)
facies_T = np.ones((nData,1))
phiMod = np.zeros((nData,1))
rhoMod = np.zeros((nData,1))
resMod = np.zeros((nData,1))
phiPar = np.empty([nFac,3])
denPar = np.empty([nFac,5])
resPar = np.empty([nFac,4])
facH = np.zeros([np.size(facCL),1])
for i in range(0,nFac):
ind = np.argwhere(facCL == i + 1)
ind= np.squeeze(ind,1)
secPhi = phiCL[ind]
secRho = rhoCL[ind]
secRes = resCL[ind]
secClayC = clayC[ind]
secPhiC = phiC[ind]
#[cHan,vpMod(ind),s2] = calibHan(secVP,secPhiC,secClayC);
#coefHanVP(i,:) = cHan';
# a parte de porosidade de neutrons e densidade nao utiliza separacao
# e calibracao distinta para grupamentos em termos de volume de
# folhelho. Os coeficientes sao repetidos (iguais) para areia e folhelho
resCoef_line = np.empty((resCoef.shape[0],1))
resCoef_line[:,0] = resCoef[i]
if (opt == 1):
[cPhi, dataPhi, cRho, dataRho, cRes, dataRes] = calibClayPhiRhoRes(secPhi, secRho, secRes, secClayC, secPhiC , rhoMin[i], resCoef_line, modPhiC)
rhoSd = cRho_T[0]
rhoWe = cRho_T[1]
rhoSh = cRho_T[2]
rhoDisp = cRho_T[2]
else:
[cPhi, dataPhi, cRho, dataRho, cRes, dataRes] = calibClayPhiRhoRes2(secPhi, secRho, secRes, secClayC, secPhiC , rhoW, resCoef_line, modPhiC)
rhoSd = cRho_T[0]
rhoWe = rhoW
rhoSh = cRho_T[1]
rhoDisp = cRho_T[1]
phiPar[i] = np.array([cPhi[0], cPhi[1], cPhi[2]])
denPar[i] = np.array([rhoSd, rhoWe, rhoO, rhoSh, rhoDisp])
resPar[i] = cRes
facH[ind] = i + 1
resPar_line = np.empty([1,nFac])
resPar_line[0,:] = resPar[i]
ind = np.argwhere(fac == i + 1)
ind= np.squeeze(ind,1)
passArg = np.array([rhoSd, rhoW, rhoSh])
[dataPhi, dataRho, dataRes] = calibCPRRreMod(phiEe[ind], claye[ind], phiPar[i],passArg, resPar_line, modPhiC)
phiMod[ind,0] = dataPhi
rhoMod[ind,0] = dataRho
resMod[ind] = dataRes
if (iOut == 1):
nOutFac = 1
facies = facies_T
neutron = phiPar_T
denLitho = denPar_T
rhoComp = rhoMod_T
phiComp = phiMod_T
resComp = resMod_T
elif (iOut == 2):
nOutFac = np.ones([nFac,1])
facies = facH
neutron = phiPar
denLitho = denPar
denLitho[:,4] = neutron[:,2]
rhoComp = rhoMod
phiComp = phiMod
resComp = resMod
else:
raise Exception ('Seletor de saida deve ser 1 ou 2')
r2Phi = rsquared (phiComp, phi)
r2Rho = rsquared (rhoComp, rho)
r2Res = rsquared (resComp, res)
print ("Fim da calibracao, com seguintes ajustes R2:\n Phi = %7.2f\n RHO = %7.2f\n RES = %7.2f\n" % (r2Phi, r2Rho, r2Res))
#Saida de Dados
def calibClayPhiRhoRes(phi, rho, Rt, vsh, phiE, rhoMin, RtCoef, mode):
""" FINALIDADE: calcular parametros dos modelos de porosidade e densidade
a partir do ajuste dos dados de perfis de porosidade de neutrons e de
densidade, usando informacoes de volume de folhelho e de porosidade efetiva
com 3 opcoes distintas para a porosidade efetiva:
1 - usa o proprio perfil de neutrons como porosidade efetiva (identidade)
2 - usa um perfil independente de porosidade efetiva (ex. testemunho)
ENTRADA:
phi - perfil de neutrons
rho - perfil de densidade
vsh - volume de folhelho (normalmente extraido do perfil de raios gama)
phiE - perfil de porosidade efetiva
rhoMin - densidade media dos graos minerais constituintes da matriz da rocha
RtCoef -
mode - indicador de porosidade efetiva, sendo 1, 2 ou 3 conforme os
casos acima descritos.
SAIDA:
phiPar - parametros de ajuste do modelo de porosidade de neutrons
phiComp - perfil calculado de porosidade de neutrons
rhoPar - parametros de ajuste do modelo de densidade
rhoComp - perfil calculado de densidade
MODELOS
porosidade de neutrons:
phi = A + 1.0 phiE + C vsh
modelo de densidade:
rho = rhoq + (rhof-rhoq) * phiE + (rhoc-rhoq) * vsh;
modelo de resistividade:
Rt = ( phiE**m Sw**n ) / ( a Rw (1-chi) ) + ( chi Sw ) / Rsh
"""
if((mode != 1) and (mode != 2) and (mode != 3)):
raise Exception("Seletor de porosidadade efetiva de entrada deve ser 1 ou 2!")
n = np.size(vsh)
if (np.size(phi) != n or np.size(rho) != n):
raise Exception("Vetores de entrada devem ter as mesmas dimensoes")
if (mode == 1 or mode == 2 and np.size(phiE) != n ):
raise Exception ("Vetor de entrada de porosidade efetiva nao esta com dimensao apropriada")
options = np.empty([0,0])
lb = np.array([0.0, 0.5])
ub = np.array([0.5, 4.0])
x0 = RtCoef[2:4,0]
cRes = RtCoef[0:2,0]
phiPar = np.empty(3)
rhoPar = np.empty(3)
if (mode == 1):
# o proprio perfil de neutrons fornece a porosidade efetiva, segundo o
# modelo phiN = phiE
phiPar = np.array([0.0, 1.0, 0.0])
phiComp = phiE
elif (mode == 2):
#nesse caso phiE e' um vetor de porosidade efetiva
col1 = 1 - (phiE + vsh)
A = np.concatenate([[col1], [phiE], [vsh]]).T
xPhi2 = fitNorm1(A,phi,10)
# parametros do modelo para ajuste da porosidade de neutrons
phiPar[0] = xPhi2[0]
phiPar[1] = xPhi2[1]
phiPar[2] = xPhi2[2]
phiComp = col1 * phiPar[0] + phiE * phiPar[1] + vsh * phiPar[2]
elif (mode ==3):
phiSand = 0.25
#nesse caso phiE e' um vetor de porosidade efetiva
col1 = 1 - (phiSand + vsh)
col2 = np.ones(n)*phiSand
A = np.concatenate([[col1], [col2], [vsh]]).T
xPhi2 = fitNorm1(A,phi,10)
#parametros do modelo para ajuste da porosidade de neutrons
phiPar[0] = xPhi2[0]
phiPar[1] = xPhi2[1]
phiPar[2] = xPhi2[2]
phiComp = col1 * phiPar[0] + phiE * phiPar[1] + vsh * phiPar[2]
vecConc = vsh*(1-phiE)
B = np.concatenate([[phiE], [vecConc]])
xRho1 = fitNorm1(B, (rho - rhoMin), 10)
rhoPar[0] = rhoMin
rhoPar[1] = xRho1[0] + rhoMin
rhoPar[2] = xRho1[1] + rhoMin
rhoComp = np.dot(B,xRho1) + rhoMin
xRes = scipy.optimize.leastsq(ofSimandouxPhiChiSw100, x0, args=(Rt, cRes, phiE, vsh))[0] #checar como vai se comportar sem lb e ub
RtPar = np.concatenate([cRes, xRes])
RtPar = RtPar.reshape(1, RtPar.size)
facies = np.ones((n,1))
RtComp = dCompSimandouxPhiChiSw100(phiE,vsh,facies,RtPar)
return phiPar, phiComp, rhoPar, rhoComp, RtPar, RtComp
def calibClayPhiRhoRes2(phi, rho, Rt, vsh, phiE, rhoWater, RtCoef, mode):
""" FINALIDADE: calcular parametros dos modelos de porosidade e densidade
a partir do ajuste dos dados de perfis de porosidade de neutrons e de
densidade, usando informacoes de volume de folhelho e de porosidade efetiva
com 3 opcoes distintas para a porosidade efetiva:
1 - usa o proprio perfil de neutrons como porosidade efetiva (identidade)
2 - usa um perfil independente de porosidade efetiva (ex. testemunho)
ENTRADA:
phi - perfil de neutrons
rho - perfil de densidade
vsh - volume de folhelho (normalmente extraido do perfil de raios gama)
phiE - perfil de porosidade efetiva
rhoWater - densidade da agua
RtCoef -
mode - indicador de porosidade efetiva, sendo 1, 2 ou 3 conforme os
casos acima descritos.
SAIDA:
phiPar - parametros de ajuste do modelo de porosidade de neutrons
phiComp - perfil calculado de porosidade de neutrons
rhoPar - parametros de ajuste do modelo de densidade
rhoComp - perfil calculado de densidade
MODELOS
porosidade de neutrons:
phi = A + 1.0 phiE + C vsh
modelo de densidade:
rho = rhoq + (rhof-rhoq) * phiE + (rhoc-rhoq) * vsh;
modelo de resistividade:
Rt = ( phiE**m Sw**n ) / ( a Rw (1-chi) ) + ( chi Sw ) / Rsh
"""
if((mode != 1) and (mode != 2) and (mode != 3)):
raise Exception("Seletor de porosidadade efetiva de entrada deve ser 1 ou 2!")
n = np.size(vsh)
if (np.size(phi) != n or np.size(rho) != n):
raise Exception("Vetores de entrada devem ter as mesmas dimensoes")
if (mode == 1 or mode == 2 and np.size(phiE) != n ):
raise Exception ("Vetor de entrada de porosidade efetiva nao esta com dimensao apropriada")
options = np.empty([0,0])
lb = np.array([0.0, 0.5])
ub = np.array([0.5, 4.0])
x0 = RtCoef[2:4,0]
cRes = RtCoef[0:2,0]
phiPar = np.empty(3)
if (mode == 1):
# o proprio perfil de neutrons fornece a porosidade efetiva, segundo o
# modelo phiN = phiE
phiPar = np.array([0.0, 1.0, 0.0])
phiComp = phiE
elif (mode == 2):
#nesse caso phiE e' um vetor de porosidade efetiva
col1 = 1 - (phiE + vsh)
A = np.concatenate([[col1], [phiE], [vsh]]).T
xPhi2 = fitNorm1(A,phi,10)
# parametros do modelo para ajuste da porosidade de neutrons
phiPar[0] = xPhi2[0]
phiPar[1] = xPhi2[1]
phiPar[2] = xPhi2[2]
phiComp = col1 * phiPar[0] + phiE * phiPar[1] + vsh * phiPar[2]
elif (mode ==3):
phiSand = 0.25
#nesse caso phiE e' um vetor de porosidade efetiva
col1 = 1 - (phiSand + vsh)
col2 = np.ones(n)*phiSand
A = np.concatenate([[col1], [col2], [vsh]]).T
xPhi2 = fitNorm1(A,phi,10)
#parametros do modelo para ajuste da porosidade de neutrons
phiPar[0] = xPhi2[0]
phiPar[1] = xPhi2[1]
phiPar[2] = xPhi2[2]
phiComp = col1 * phiPar[0] + phiE * phiPar[1] + vsh * phiPar[2]
col2 = vsh*(1-phiE)
col1 = (1-vsh)*(1-phiE)
B = np.concatenate([[col1], [col2]]).T
rhoCte = rhoWater * phiE
xRho = fitNorm1(B, (rho - rhoCte),10)
rhoPar = np.empty(2)
rhoPar[0] = xRho[0]
rhoPar[1] = xRho[1]
rhoComp = np.dot(B, xRho) + rhoCte
xRes = scipy.optimize.leastsq(ofSimandouxPhiChiSw100, x0, args=(Rt, cRes, phiE, vsh))[0]
print ("VALORES DE xRES", xRes)
RtPar = np.concatenate([cRes, xRes])
RtPar = np.reshape(RtPar,(1,np.size(RtPar)))
facies = np.ones((n,1))
RtComp = dCompSimandouxPhiChiSw100(phiE,vsh,facies,RtPar)
return phiPar, phiComp, rhoPar, rhoComp, RtPar, RtComp
def calibCPRRreMod(phiE, vsh, phiPar, rhoPar, RtPar, mode):
# FINALIDADE: calcular os dados modelados usando os modelos calibrados
# em outro intervalo do poco, seguindo as 3 opcoes distintas para a porosidade efetiva:
# 1 - usa o proprio perfil de neutrons como porosidade efetiva (identidade)
# 2 - usa um perfil independente de porosidade efetiva (ex. testemunho)
#
# ENTRADA:
# phi - perfil de neutrons
# rho - perfil de densidade
# vsh - volume de folhelho (normalmente extraido do perfil de raios gama)
# phiE - perfil de porosidade efetiva
# phiPar
# rhoPar - densidade da agua
# RtPar -
# mode - indicador de porosidade efetiva, sendo 1, 2 ou 3 conforme os
# casos acima descritos.
#
# SAIDA:
# phiComp - perfil calculado de porosidade de neutrons
# rhoComp - perfil calculado de densidade
# RtComp
#
#
# MODELOS
# porosidade de neutrons:
# phi = A + 1.0 phiE + C vsh
# modelo de densidade:
# rho = rhoq + (rhof-rhoq) * phiE + (rhoc-rhoq) * vsh;
# modelo de resistividade:
# Rt = ( phiE**m Sw**n ) / ( a Rw (1-chi) ) + ( chi Sw ) / Rsh
if (mode != 1 and mode != 2 and mode != 3):
raise Exception ('Seletor de porosidadade efetiva de entrada deve ser 1 ou 2')
n = np.size(vsh)
if (mode == 1 or mode == 2 and np.size(phiE) != n ):
raise Exception ('Vetor de entrada de porosidade efetiva nao esta com dimensao apropriada');
if (mode == 1):
# o proprio perfil de neutrons fornece a porosidade efetiva, segundo o
# modelo phiN = phiE
phiPar = np.array([0.0, 1.0, 0.0])
phiComp = phiE
elif (mode ==2):
#nesse caso phiE e' um vetor de porosidade efetiva
col1 = 1 - phiE + vsh
phiComp = col1 * phiPar[0] + phiE * phiPar[1] + vsh * phiPar[2]
elif (mode == 3):
phiSand = 0.25
# nesse caso phiE e' um vetor de porosidade efetiva
col1 = 1 - (phiSand + vsh)
phiPar[0] = xPhi2[0]
phiPar[1] = xPhi2[1] #Verificar o uso desse mode 3, talvez seja melhor cortar fora do if la em cima
phiPar[2] = xPhi2[2]
phiComp = col1 * phiPar[0] + phiE * phiPar[1] + vsh * phiPar[2]
col2 = vsh*(1-phiE)
col1 = (1-vsh)*(1 - phiE)
B = np.concatenate([[col1], [col2]])
rhoCte = rhoPar[1]*phiE
rhoComp = col1 * rhoPar[0] + col2*rhoPar[2] + rhoCte
facies = np.ones((n,1))
RtComp = dCompSimandouxPhiChiSw100(phiE,vsh,facies,RtPar)
return phiComp, rhoComp, RtComp
def fitNorm1(A, d, maxIt):
xLS = | np.linalg.lstsq(A,d) | numpy.linalg.lstsq |
#
# Test for the operator class
#
import pybamm
import numpy as np
import unittest
def get_mesh_for_testing(
xpts=None, rpts=10, ypts=15, zpts=15, geometry=None, cc_submesh=None, order=2
):
param = pybamm.ParameterValues(
values={
"Electrode width [m]": 0.4,
"Electrode height [m]": 0.5,
"Negative tab width [m]": 0.1,
"Negative tab centre y-coordinate [m]": 0.1,
"Negative tab centre z-coordinate [m]": 0.0,
"Positive tab width [m]": 0.1,
"Positive tab centre y-coordinate [m]": 0.3,
"Positive tab centre z-coordinate [m]": 0.5,
"Negative electrode thickness [m]": 0.3,
"Separator thickness [m]": 0.3,
"Positive electrode thickness [m]": 0.3,
}
)
if geometry is None:
geometry = pybamm.battery_geometry()
param.process_geometry(geometry)
submesh_types = {
"negative electrode": pybamm.MeshGenerator(
pybamm.SpectralVolume1DSubMesh, {"order": order}
),
"separator": pybamm.MeshGenerator(
pybamm.SpectralVolume1DSubMesh, {"order": order}
),
"positive electrode": pybamm.MeshGenerator(
pybamm.SpectralVolume1DSubMesh, {"order": order}
),
"negative particle": pybamm.MeshGenerator(
pybamm.SpectralVolume1DSubMesh, {"order": order}
),
"positive particle": pybamm.MeshGenerator(
pybamm.SpectralVolume1DSubMesh, {"order": order}
),
"current collector": pybamm.SubMesh0D,
}
if cc_submesh:
submesh_types["current collector"] = cc_submesh
if xpts is None:
xn_pts, xs_pts, xp_pts = 40, 25, 35
else:
xn_pts, xs_pts, xp_pts = xpts, xpts, xpts
var_pts = {
"x_n": xn_pts,
"x_s": xs_pts,
"x_p": xp_pts,
"r_n": rpts,
"r_p": rpts,
"y": ypts,
"z": zpts,
}
return pybamm.Mesh(geometry, submesh_types, var_pts)
def get_p2d_mesh_for_testing(xpts=None, rpts=10):
geometry = pybamm.battery_geometry()
return get_mesh_for_testing(xpts=xpts, rpts=rpts, geometry=geometry)
class TestSpectralVolumeConvergence(unittest.TestCase):
def test_grad_div_broadcast(self):
# create mesh and discretisation
spatial_methods = {"macroscale": pybamm.SpectralVolume()}
mesh = get_mesh_for_testing()
disc = pybamm.Discretisation(mesh, spatial_methods)
a = pybamm.PrimaryBroadcast(1, "negative electrode")
grad_a = disc.process_symbol(pybamm.grad(a))
np.testing.assert_array_equal(grad_a.evaluate(), 0)
a_edge = pybamm.PrimaryBroadcastToEdges(1, "negative electrode")
div_a = disc.process_symbol(pybamm.div(a_edge))
np.testing.assert_array_equal(div_a.evaluate(), 0)
div_grad_a = disc.process_symbol(pybamm.div(pybamm.grad(a)))
np.testing.assert_array_equal(div_grad_a.evaluate(), 0)
def test_cartesian_spherical_grad_convergence(self):
# note that grad function is the same for cartesian and spherical
order = 2
spatial_methods = {"macroscale": pybamm.SpectralVolume(order=order)}
whole_cell = ["negative electrode", "separator", "positive electrode"]
# Define variable
var = pybamm.Variable("var", domain=whole_cell)
grad_eqn = pybamm.grad(var)
boundary_conditions = {
var.id: {
"left": (pybamm.Scalar(0), "Dirichlet"),
"right": (pybamm.Scalar(np.sin(1) ** 2), "Dirichlet"),
}
}
# Function for convergence testing
def get_error(n):
# create mesh and discretisation
mesh = get_mesh_for_testing(n, order=order)
disc = pybamm.Discretisation(mesh, spatial_methods)
disc.bcs = boundary_conditions
disc.set_variable_slices([var])
# Define exact solutions
combined_submesh = mesh.combine_submeshes(*whole_cell)
x = combined_submesh.nodes
y = np.sin(x) ** 2
# var = sin(x)**2 --> dvardx = 2*sin(x)*cos(x)
x_edge = combined_submesh.edges
grad_exact = 2 * np.sin(x_edge) * np.cos(x_edge)
# Discretise and evaluate
grad_eqn_disc = disc.process_symbol(grad_eqn)
grad_approx = grad_eqn_disc.evaluate(y=y)
# Return difference between approx and exact
return grad_approx[:, 0] - grad_exact
# Get errors
ns = 100 * 2 ** np.arange(5)
errs = {n: get_error(int(n)) for n in ns}
# expect linear convergence at internal points
# (the higher-order convergence is in the integral means,
# not in the edge values)
errs_internal = np.array([np.linalg.norm(errs[n][1:-1], np.inf) for n in ns])
rates = np.log2(errs_internal[:-1] / errs_internal[1:])
np.testing.assert_array_less(0.99 * np.ones_like(rates), rates)
# expect linear convergence at the boundaries
for idx in [0, -1]:
err_boundary = np.array([errs[n][idx] for n in ns])
rates = np.log2(err_boundary[:-1] / err_boundary[1:])
np.testing.assert_array_less(0.98 * np.ones_like(rates), rates)
def test_cartesian_div_convergence(self):
whole_cell = ["negative electrode", "separator", "positive electrode"]
spatial_methods = {"macroscale": pybamm.SpectralVolume()}
# Function for convergence testing
def get_error(n):
# create mesh and discretisation
mesh = get_mesh_for_testing(n)
disc = pybamm.Discretisation(mesh, spatial_methods)
combined_submesh = mesh.combine_submeshes(*whole_cell)
x = combined_submesh.nodes
x_edge = pybamm.standard_spatial_vars.x_edge
# Define flux and bcs
N = x_edge ** 2 * pybamm.cos(x_edge)
div_eqn = pybamm.div(N)
# Define exact solutions
# N = x**2 * cos(x) --> dNdx = x*(2cos(x) - xsin(x))
div_exact = x * (2 * np.cos(x) - x * np.sin(x))
# Discretise and evaluate
div_eqn_disc = disc.process_symbol(div_eqn)
div_approx = div_eqn_disc.evaluate()
# Return difference between approx and exact
return div_approx[:, 0] - div_exact
# Get errors
ns = 10 * 2 ** np.arange(6)
errs = {n: get_error(int(n)) for n in ns}
# expect quadratic convergence everywhere
err_norm = np.array([np.linalg.norm(errs[n], np.inf) for n in ns])
rates = np.log2(err_norm[:-1] / err_norm[1:])
np.testing.assert_array_less(1.99 * np.ones_like(rates), rates)
def test_spherical_div_convergence_quadratic(self):
# test div( r**2 * sin(r) ) == 2/r*sin(r) + cos(r)
spatial_methods = {"negative particle": pybamm.SpectralVolume()}
# Function for convergence testing
def get_error(n):
# create mesh and discretisation (single particle)
mesh = get_mesh_for_testing(rpts=n)
disc = pybamm.Discretisation(mesh, spatial_methods)
submesh = mesh["negative particle"]
r = submesh.nodes
r_edge = pybamm.SpatialVariableEdge("r_n", domain=["negative particle"])
# Define flux and bcs
N = pybamm.sin(r_edge)
div_eqn = pybamm.div(N)
# Define exact solutions
# N = r**3 --> div(N) = 5 * r**2
div_exact = 2 / r * np.sin(r) + np.cos(r)
# Discretise and evaluate
div_eqn_disc = disc.process_symbol(div_eqn)
div_approx = div_eqn_disc.evaluate()
# Return difference between approx and exact
return div_approx[:, 0] - div_exact
# Get errors
ns = 10 * 2 ** np.arange(6)
errs = {n: get_error(int(n)) for n in ns}
# expect quadratic convergence everywhere
err_norm = np.array([np.linalg.norm(errs[n], np.inf) for n in ns])
rates = | np.log2(err_norm[:-1] / err_norm[1:]) | numpy.log2 |
from . import DATA_DIR
import sys
import glob
from .background_systems import BackgroundSystemModel
from .export import ExportInventory
from inspect import currentframe, getframeinfo
from pathlib import Path
from scipy import sparse
import csv
import itertools
import numexpr as ne
import numpy as np
import xarray as xr
REMIND_FILES_DIR = DATA_DIR / "IAM"
class InventoryCalculation:
"""
Build and solve the inventory for results characterization and inventory export
Vehicles to be analyzed can be filtered by passing a `scope` dictionary.
Some assumptions in the background system can also be adjusted by passing a `background_configuration` dictionary.
.. code-block:: python
scope = {
'powertrain':['BEV', 'FCEV', 'ICEV-p'],
}
background_configuration = {
'country' : 'DE', # will use the network electricity losses of Germany
'custom electricity mix' : [[1,0,0,0,0,0,0,0,0,0], # in this case, 100% hydropower for the first year
[0.5,0.5,0,0,0,0,0,0,0,0]], # in this case, 50% hydro, 50% nuclear for the second year
'hydrogen technology' : 'Electrolysis',
'petrol technology': 'bioethanol - wheat straw',
'alternative petrol share':[0.1,0.2],
'battery technology': 'LFP',
'battery origin': 'NO'
}
InventoryCalculation(CarModel.array,
background_configuration=background_configuration,
scope=scope,
scenario="RCP26")
The `custom electricity mix` key in the background_configuration dictionary defines an electricity mix to apply,
under the form of one or several array(s), depending on teh number of years to analyze,
that should total 1, of which the indices correspond to:
- [0]: hydro-power
- [1]: nuclear
- [2]: natural gas
- [3]: solar power
- [4]: wind power
- [5]: biomass
- [6]: coal
- [7]: oil
- [8]: geothermal
- [9]: waste incineration
If none is given, the electricity mix corresponding to the country specified in `country` will be selected.
If no country is specified, Europe applies.
The `alternative petrol share` key contains an array with shares of alternative petrol fuel for each year, to create a custom blend.
If none is provided, a blend provided by the Integrated Assessment model REMIND is used, which will depend on the REMIND energy scenario selected.
:ivar array: array from the CarModel class
:vartype array: CarModel.array
:ivar scope: dictionary that contains filters for narrowing the analysis
:ivar background_configuration: dictionary that contains choices for background system
:ivar scenario: REMIND energy scenario to use ("BAU": business-as-usual or "RCP26": limits radiative forcing to 2.6 W/m^2.).
"BAU" selected by default.
.. code-block:: python
"""
def __init__(
self, array, scope=None, background_configuration=None, scenario="SSP2-Base"
):
if scope is None:
scope = {}
scope["size"] = array.coords["size"].values.tolist()
scope["powertrain"] = array.coords["powertrain"].values.tolist()
scope["year"] = array.coords["year"].values.tolist()
else:
scope["size"] = scope.get("size", array.coords["size"].values.tolist())
scope["powertrain"] = scope.get(
"powertrain", array.coords["powertrain"].values.tolist()
)
scope["year"] = scope.get("year", array.coords["year"].values.tolist())
self.scope = scope
self.scenario = scenario
if background_configuration is None:
self.background_configuration = {"country": "RER"}
else:
self.background_configuration = background_configuration
if "country" not in self.background_configuration:
self.background_configuration["country"] = "RER"
if "energy storage" not in self.background_configuration:
self.background_configuration["energy storage"] = {
"electric": {"type":"NMC",
"origin":"CN"}
}
else:
if "electric" not in self.background_configuration["energy storage"]:
self.background_configuration["energy storage"]["electric"] = {"type":"NMC",
"origin":"CN"}
else:
if "origin" not in self.background_configuration["energy storage"]["electric"]:
self.background_configuration["energy storage"]["electric"]["origin"] = "CN"
if "type" not in self.background_configuration["energy storage"]["electric"]:
self.background_configuration["energy storage"]["electric"]["type"] = "NMC"
array = array.sel(
powertrain=self.scope["powertrain"],
year=self.scope["year"],
size=self.scope["size"],
)
self.array = array.stack(desired=["size", "powertrain", "year"])
self.iterations = len(array.value.values)
self.number_of_cars = (
len(self.scope["size"])
* len(self.scope["powertrain"])
* len(self.scope["year"])
)
self.array_inputs = {
x: i for i, x in enumerate(list(self.array.parameter.values), 0)
}
self.array_powertrains = {
x: i for i, x in enumerate(list(self.array.powertrain.values), 0)
}
self.A = self.get_A_matrix()
self.inputs = self.get_dict_input()
self.add_additional_activities()
self.rev_inputs = self.get_rev_dict_input()
self.index_cng = [self.inputs[i] for i in self.inputs if "ICEV-g" in i[0]]
self.index_combustion_wo_cng = [
self.inputs[i]
for i in self.inputs
if any(
ele in i[0]
for ele in ["ICEV-p", "HEV-p", "PHEV-p", "ICEV-d", "PHEV-d", "HEV-d"]
)
]
self.index_diesel = [self.inputs[i] for i in self.inputs if "ICEV-d" in i[0]]
self.index_all_petrol = [
self.inputs[i]
for i in self.inputs
if any(ele in i[0] for ele in ["ICEV-p", "HEV-p", "PHEV-p"])
]
self.index_petrol = [self.inputs[i] for i in self.inputs if "ICEV-p" in i[0]]
self.index_hybrid = [
self.inputs[i]
for i in self.inputs
if any(ele in i[0] for ele in ["HEV-p", "HEV-d"])
]
self.index_plugin_hybrid = [
self.inputs[i] for i in self.inputs if "PHEV" in i[0]
]
self.index_fuel_cell = [self.inputs[i] for i in self.inputs if "FCEV" in i[0]]
self.index_emissions = [
self.inputs[i]
for i in self.inputs
if "air" in i[1][0]
and len(i[1]) > 1
and i[0]
not in [
"Carbon dioxide, fossil",
"Carbon monoxide, non-fossil",
"Methane, non-fossil",
"Particulates, > 10 um",
]
]
self.map_non_fuel_emissions = {
(
"Methane, fossil",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Methane direct emissions, suburban",
(
"Methane, fossil",
("air", "low population density, long-term"),
"kilogram",
): "Methane direct emissions, rural",
(
"Lead",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Lead direct emissions, suburban",
(
"Ammonia",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Ammonia direct emissions, suburban",
(
"NMVOC, non-methane volatile organic compounds, unspecified origin",
("air", "urban air close to ground"),
"kilogram",
): "NMVOC direct emissions, urban",
(
"PAH, polycyclic aromatic hydrocarbons",
("air", "urban air close to ground"),
"kilogram",
): "Hydrocarbons direct emissions, urban",
(
"Dinitrogen monoxide",
("air", "low population density, long-term"),
"kilogram",
): "Dinitrogen oxide direct emissions, rural",
(
"Nitrogen oxides",
("air", "urban air close to ground"),
"kilogram",
): "Nitrogen oxides direct emissions, urban",
(
"Ammonia",
("air", "urban air close to ground"),
"kilogram",
): "Ammonia direct emissions, urban",
(
"Particulates, < 2.5 um",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Particulate matters direct emissions, suburban",
(
"Carbon monoxide, fossil",
("air", "urban air close to ground"),
"kilogram",
): "Carbon monoxide direct emissions, urban",
(
"Nitrogen oxides",
("air", "low population density, long-term"),
"kilogram",
): "Nitrogen oxides direct emissions, rural",
(
"NMVOC, non-methane volatile organic compounds, unspecified origin",
("air", "non-urban air or from high stacks"),
"kilogram",
): "NMVOC direct emissions, suburban",
(
"Benzene",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Benzene direct emissions, suburban",
(
"Ammonia",
("air", "low population density, long-term"),
"kilogram",
): "Ammonia direct emissions, rural",
(
"Sulfur dioxide",
("air", "low population density, long-term"),
"kilogram",
): "Sulfur dioxide direct emissions, rural",
(
"NMVOC, non-methane volatile organic compounds, unspecified origin",
("air", "low population density, long-term"),
"kilogram",
): "NMVOC direct emissions, rural",
(
"Particulates, < 2.5 um",
("air", "urban air close to ground"),
"kilogram",
): "Particulate matters direct emissions, urban",
(
"Sulfur dioxide",
("air", "urban air close to ground"),
"kilogram",
): "Sulfur dioxide direct emissions, urban",
(
"Dinitrogen monoxide",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Dinitrogen oxide direct emissions, suburban",
(
"Carbon monoxide, fossil",
("air", "low population density, long-term"),
"kilogram",
): "Carbon monoxide direct emissions, rural",
(
"Methane, fossil",
("air", "urban air close to ground"),
"kilogram",
): "Methane direct emissions, urban",
(
"Carbon monoxide, fossil",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Carbon monoxide direct emissions, suburban",
(
"Lead",
("air", "urban air close to ground"),
"kilogram",
): "Lead direct emissions, urban",
(
"Particulates, < 2.5 um",
("air", "low population density, long-term"),
"kilogram",
): "Particulate matters direct emissions, rural",
(
"Sulfur dioxide",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Sulfur dioxide direct emissions, suburban",
(
"Benzene",
("air", "low population density, long-term"),
"kilogram",
): "Benzene direct emissions, rural",
(
"Nitrogen oxides",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Nitrogen oxides direct emissions, suburban",
(
"Lead",
("air", "low population density, long-term"),
"kilogram",
): "Lead direct emissions, rural",
(
"Benzene",
("air", "urban air close to ground"),
"kilogram",
): "Benzene direct emissions, urban",
(
"PAH, polycyclic aromatic hydrocarbons",
("air", "low population density, long-term"),
"kilogram",
): "Hydrocarbons direct emissions, rural",
(
"PAH, polycyclic aromatic hydrocarbons",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Hydrocarbons direct emissions, suburban",
(
"Dinitrogen monoxide",
("air", "urban air close to ground"),
"kilogram",
): "Dinitrogen oxide direct emissions, urban",
}
self.index_noise = [self.inputs[i] for i in self.inputs if "noise" in i[0]]
self.list_cat, self.split_indices = self.get_split_indices()
self.bs = BackgroundSystemModel()
def __getitem__(self, key):
"""
Make class['foo'] automatically filter for the parameter 'foo'
Makes the model code much cleaner
:param key: Parameter name
:type key: str
:return: `array` filtered after the parameter selected
"""
return self.temp_array.sel(parameter=key)
def get_results_table(self, method, level, split, sensitivity=False):
"""
Format an xarray.DataArray array to receive the results.
:param method: impact assessment method. Only "ReCiPe" method available at the moment.
:param level: "midpoint" or "endpoint" impact assessment level. Only "midpoint" available at the moment.
:param split: "components" or "impact categories". Split by impact categories only applicable when "endpoint" level is applied.
:return: xarrray.DataArray
"""
if split == "components":
cat = [
"direct",
"energy chain",
"maintenance",
"glider",
"EoL",
"powertrain",
"energy storage",
"road",
]
dict_impact_cat = self.get_dict_impact_categories()
if sensitivity == False:
response = xr.DataArray(
np.zeros(
(
self.B.shape[1],
len(self.scope["size"]),
len(self.scope["powertrain"]),
len(self.scope["year"]),
len(cat),
self.iterations,
)
),
coords=[
dict_impact_cat[method][level],
self.scope["size"],
self.scope["powertrain"],
self.scope["year"],
cat,
| np.arange(0, self.iterations) | numpy.arange |
import numpy as np
import h5py as h5
import ctypes as ct
import os
from scipy import fft, ifft
from scipy.interpolate import interp1d
from control import forced_response, TransferFunction
import sharpy.utils.cout_utils as cout
import sharpy.utils.generator_interface as generator_interface
import sharpy.utils.settings as settings
import sharpy.utils.solver_interface as solver_interface
from sharpy.utils.constants import deg2rad
import sharpy.utils.h5utils as h5utils
import sharpy.utils.algebra as algebra
def compute_xf_zf(hf, vf, l, w, EA, cb):
"""
Fairlead location (xf, zf) computation
"""
root1, root2, ln1, ln2, lb = rename_terms(vf, hf, w, l)
# Define if there is part of the mooring line on the bed
if lb <= 0:
nobed = True
else:
nobed = False
# Compute the position of the fairlead
if nobed:
xf = hf/w*(ln1 - ln2) + hf*l/EA
zf = hf/w*(root1 - root2) + 1./EA*(vf*l-w*l**2/2)
else:
xf = lb + hf/w*ln1 + hf*l/EA
if not cb == 0.:
xf += cb*w/2/EA*(-lb**2 + (lb - hf/cb/w)*np.maximum((lb - hf/cb/w), 0))
zf = hf/w*(root1 - 1) + vf**2/2/EA/w
return xf, zf
def compute_jacobian(hf, vf, l, w, EA, cb):
"""
Analytical computation of the Jacobian of equations
in function compute_xf_zf
"""
root1, root2, ln1, ln2, lb = rename_terms(vf, hf, w, l)
# Compute their deivatives
der_root1_hf = 0.5*(1. + (vf/hf)**2)**(-0.5)*(2*vf/hf*(-vf/hf/hf))
der_root1_vf = 0.5*(1. + (vf/hf)**2)**(-0.5)*(2*vf/hf/hf)
der_root2_hf = 0.5*(1. + ((vf - w*l)/hf)**2)**(-0.5)*(2.*(vf - w*l)/hf*(-(vf - w*l)/hf/hf))
der_root2_vf = 0.5*(1. + ((vf - w*l)/hf)**2)**(-0.5)*(2.*(vf - w*l)/hf/hf)
der_ln1_hf = 1./(vf/hf + root1)*(vf/hf/hf + der_root1_hf)
der_ln1_vf = 1./(vf/hf + root1)*(1./hf + der_root1_vf)
der_ln2_hf = 1./((vf - w*l)/hf + root2)*(-(vf - w*l)/hf/hf + der_root2_hf)
der_ln2_vf = 1./((vf - w*l)/hf + root2)*(1./hf + der_root2_vf)
der_lb_hf = 0.
der_lb_vf = -1./w
# Define if there is part of the mooring line on the bed
if lb <= 0:
nobed = True
else:
nobed = False
# Compute the Jacobian
if nobed:
der_xf_hf = 1./w*(ln1 - ln2) + hf/w*(der_ln1_hf + der_ln2_hf) + l/EA
der_xf_vf = hf/w*(der_ln1_vf + der_ln2_vf)
der_zf_hf = 1./w*(root1 - root2) + hf/w*(der_root1_hf - der_root2_hf)
der_zf_vf = hf/w*(der_root1_vf - der_root2_vf) + 1./EA*l
else:
der_xf_hf = der_lb_hf + 1./w*ln1 + hf/w*der_ln1_hf + l/EA
if not cb == 0.:
arg1_max = l - vf/w - hf/cb/w
if arg1_max > 0.:
der_xf_hf += cb*w/2/EA*(2*(arg1_max)*(-1/cb/w))
der_xf_vf = der_lb_vf + hf/w*der_ln1_vf + cb*w/2/EA*(-2.*lb*der_lb_vf)
if not cb == 0.:
arg1_max = l - vf/w - hf/cb/w
if arg1_max > 0.:
der_xf_vf += cb*w/2/EA*(2.*(lb - hf/cb/w)*der_lb_vf)
der_zf_hf = 1/w*(root1 - 1) + hf/w*der_root1_hf
der_zf_vf = hf/w*der_root1_vf + vf/EA/w
J = np.array([[der_xf_hf, der_xf_vf],[der_zf_hf, der_zf_vf]])
return J
def rename_terms(vf, hf, w, l):
"""
Rename some terms for convenience
"""
root1 = np.sqrt(1. + (vf/hf)**2)
root2 = np.sqrt(1. + ((vf - w*l)/hf)**2)
ln1 = np.log(vf/hf + root1)
ln2 = np.log((vf - w*l)/hf + root2)
lb = l - vf/w
return root1, root2, ln1, ln2, lb
def quasisteady_mooring(xf, zf, l, w, EA, cb, hf0=None, vf0=None):
"""
Computation of the forces generated by the mooring system
It performs a Newton-Raphson iteration based on the known equations
in compute_xf_zf function and the Jacobian
"""
# Initialise guess for hf0 and vf0
if xf == 0:
lambda0 = 1e6
elif np.sqrt(xf**2 + zf**2) > l:
lambda0 = 0.2
else:
lambda0 = np.sqrt(3*((l**2 - zf**2)/xf**2 - 1))
if hf0 is None:
hf0 = np.abs(w*xf/2/lambda0)
if vf0 is None:
vf0 = w/2*(zf/np.tanh(lambda0) + l)
# Compute the solution through Newton-Raphson iteration
hf_est = hf0 + 0.
vf_est = vf0 + 0.
xf_est, zf_est = compute_xf_zf(hf_est, vf_est, l, w, EA, cb)
# print("initial: ", xf_est, zf_est)
tol = 1e-6
error = 2*tol
max_iter = 10000
it = 0
while ((error > tol) and (it < max_iter)):
J_est = compute_jacobian(hf_est, vf_est, l, w, EA, cb)
inv_J_est = np.linalg.inv(J_est)
hf_est += inv_J_est[0, 0]*(xf - xf_est) + inv_J_est[0, 1]*(zf - zf_est)
vf_est += inv_J_est[1, 0]*(xf - xf_est) + inv_J_est[1, 1]*(zf - zf_est)
# hf += (xf - xf_est)/J[0, 0] + (zf - zf_est)/J[1, 0]
# vf += (xf - xf_est)/J[0, 1] + (zf - zf_est)/J[1, 1]
xf_est, zf_est = compute_xf_zf(hf_est, vf_est, l, w, EA, cb)
error = np.maximum(np.abs(xf - xf_est), np.abs(zf - zf_est))
# print(error)
it += 1
if ((it == max_iter) and (error > tol)):
cout.cout_wrap(("Mooring system did not converge. error %f" % error), 4)
print("Mooring system did not converge. error %f" % error)
return hf_est, vf_est
def wave_radiation_damping(K, qdot, it, dt):
"""
This function computes the wave radiation damping assuming K constant
"""
qdot_int = np.zeros((6,))
for idof in range(6):
qdot_int[idof] = np.trapz(np.arange(0, it + 1, 1)*dt, qdot[0:it, idof])
return np.dot(K, qdot_int)
def change_of_to_sharpy(matrix_of):
"""
Change between frame of reference of OpenFAST and the
usual one in SHARPy
"""
sub_mat = np.array([[0., 0, 1],
[0., -1, 0],
[1., 0, 0]])
C_of_s = np.zeros((6,6))
C_of_s[0:3, 0:3] = sub_mat
C_of_s[3:6, 3:6] = sub_mat
matrix_sharpy = np.dot(C_of_s.T, np.dot(matrix_of, C_of_s))
return matrix_sharpy
# def interp_1st_dim_matrix(A, vec, value):
#
# # Make sure vec is ordered in strictly ascending order
# if (np.diff(vec) <= 0).any():
# cout.cout_wrap("ERROR: vec should be in strictly increasing order", 4)
# if not A.shape[0] == vec.shape[0]:
# cout.cout_wrap("ERROR: Incoherent vector and matrix size", 4)
#
# # Compute the positions to interpolate
# if value <= vec[0]:
# return A[0, ...]
# elif ((value >= vec[-1]) or (value > vec[-2] and np.isinf(vec[-1]))):
# return A[-1, ...]
# else:
# i = 0
# while value > vec[i]:
# i += 1
# dist = vec[i] - vec[i - 1]
# rel_dist_to_im1 = (value - vec[i - 1])/dist
# rel_dist_to_i = (vec[i] - value)/dist
#
# return A[i - 1, ...]*rel_dist_to_i + A[i, ...]*rel_dist_to_im1
def rfval(num, den, z):
"""
Evaluate a rational function given by the coefficients of the numerator (num) and
denominator (den) at z
"""
return np.polyval(num, z)/np.polyval(den, z)
def matrix_from_rf(dict_rf, w):
"""
Create a matrix from the rational function approximation of each one of the elements
"""
H = np.zeros((6, 6))
for i in range(6):
for j in range(6):
pos = "%d_%d" % (i, j)
H[i, j] = rfval(dict_rf[pos]['num'], dict_rf[pos]['den'], w)
return H
def response_freq_dep_matrix(H, omega_H, q, it_, dt):
"""
Compute the frequency response of a system with a transfer function depending on the frequency
F(t) = H(omega) * q(t)
"""
it = it_ + 1
omega_fft = np.linspace(0, 1/(2*dt), it//2)[:it//2]
fourier_q = fft(q[:it, :], axis=0)
fourier_f = | np.zeros_like(fourier_q) | numpy.zeros_like |
import numpy as np
np.random.seed(1234)
from time import time
import os
# os.environ["CUDA_VISIBLE_DEVICES"]="0"
from model import WSTC, f1
from keras.optimizers import SGD
from gen import augment, pseudodocs
from load_data import load_dataset
from gensim.models import word2vec
def train_word2vec(sentence_matrix, vocabulary_inv, dataset_name, mode='skipgram',
num_features=100, min_word_count=5, context=5):
model_dir = './' + dataset_name
model_name = "embedding"
model_name = os.path.join(model_dir, model_name)
if os.path.exists(model_name):
embedding_model = word2vec.Word2Vec.load(model_name)
print("Loading existing Word2Vec model {}...".format(model_name))
else:
num_workers = 15 # Number of threads to run in parallel
downsampling = 1e-3 # Downsample setting for frequent words
print('Training Word2Vec model...')
sentences = [[vocabulary_inv[w] for w in s] for s in sentence_matrix]
if mode == 'skipgram':
sg = 1
print('Model: skip-gram')
elif mode == 'cbow':
sg = 0
print('Model: CBOW')
embedding_model = word2vec.Word2Vec(sentences, workers=num_workers, sg=sg,
size=num_features, min_count=min_word_count,
window=context, sample=downsampling)
embedding_model.init_sims(replace=True)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
print("Saving Word2Vec model {}".format(model_name))
embedding_model.save(model_name)
embedding_weights = {key: embedding_model[word] if word in embedding_model else
np.random.uniform(-0.25, 0.25, embedding_model.vector_size)
for key, word in vocabulary_inv.items()}
return embedding_weights
def write_output(write_path, y_pred, perm):
invperm = np.zeros(len(perm), dtype='int32')
for i,v in enumerate(perm):
invperm[v] = i
y_pred = y_pred[invperm]
with open(os.path.join(write_path, 'out.txt'), 'w') as f:
for val in y_pred:
f.write(str(val) + '\n')
print("Classification results are written in {}".format(os.path.join(write_path, 'out.txt')))
return
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='main',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
### Basic settings ###
# dataset selection: AG's News (default) and Yelp Review
parser.add_argument('--dataset', default='20news', choices=['agnews', 'yelp', 'nyt', 'arxiv', '20news'])
# neural model selection: Convolutional Neural Network (default) and Hierarchical Attention Network
parser.add_argument('--model', default='cnn', choices=['cnn', 'rnn'])
# weak supervision selection: label surface names (default), class-related keywords and labeled documents
parser.add_argument('--sup_source', default='keywords', choices=['labels', 'keywords', 'docs'])
# whether ground truth labels are available for evaluation: True (default), False
parser.add_argument('--with_evaluation', default='True', choices=['True', 'False'])
### Training settings ###
# mini-batch size for both pre-training and self-training: 256 (default)
parser.add_argument('--batch_size', default=256, type=int)
# maximum self-training iterations: 5000 (default)
parser.add_argument('--maxiter', default=5e3, type=int)
# pre-training epochs: None (default)
parser.add_argument('--pretrain_epochs', default=None, type=int)
# self-training update interval: None (default)
parser.add_argument('--update_interval', default=None, type=int)
### Hyperparameters settings ###
# background word distribution weight (alpha): 0.2 (default)
parser.add_argument('--alpha', default=0.2, type=float)
# number of generated pseudo documents per class (beta): 500 (default)
parser.add_argument('--beta', default=500, type=int)
# keyword vocabulary size (gamma): 50 (default)
parser.add_argument('--gamma', default=50, type=int)
# self-training stopping criterion (delta): None (default)
parser.add_argument('--delta', default=0.1, type=float)
### Case study settings ###
# trained model directory: None (default)
parser.add_argument('--trained_weights', default=None)
args = parser.parse_args()
print(args)
alpha = args.alpha
beta = args.beta
gamma = args.gamma
delta = args.delta
word_embedding_dim = 100
if args.model == 'cnn':
if args.dataset == 'agnews':
update_interval = 50
pretrain_epochs = 20
self_lr = 1e-3
max_sequence_length = 100
else:
update_interval = 50
pretrain_epochs = 30
self_lr = 1e-4
max_sequence_length = 500
decay = 1e-6
elif args.model == 'rnn':
if args.dataset == 'agnews':
update_interval = 50
pretrain_epochs = 100
self_lr = 1e-3
sent_len = 45
doc_len = 10
elif args.dataset == 'yelp':
update_interval = 100
pretrain_epochs = 200
self_lr = 1e-4
sent_len = 30
doc_len = 40
decay = 1e-5
max_sequence_length = [doc_len, sent_len]
if args.update_interval is not None:
update_interval = args.update_interval
if args.pretrain_epochs is not None:
pretrain_epochs = args.pretrain_epochs
if args.with_evaluation == 'True':
with_evaluation = True
else:
with_evaluation = False
if args.sup_source == 'labels' or args.sup_source == 'keywords':
x, y, word_counts, vocabulary, vocabulary_inv_list, len_avg, len_std, word_sup_list, perm = \
load_dataset(args.dataset, model=args.model, sup_source=args.sup_source, with_evaluation=with_evaluation, truncate_len=max_sequence_length)
sup_idx = None
elif args.sup_source == 'docs':
x, y, word_counts, vocabulary, vocabulary_inv_list, len_avg, len_std, word_sup_list, sup_idx, perm = \
load_dataset(args.dataset, model=args.model, sup_source=args.sup_source, with_evaluation=with_evaluation, truncate_len=max_sequence_length)
np.random.seed(1234)
vocabulary_inv = {key: value for key, value in enumerate(vocabulary_inv_list)}
vocab_sz = len(vocabulary_inv)
n_classes = len(word_sup_list)
if args.model == 'cnn':
if x.shape[1] < max_sequence_length:
max_sequence_length = x.shape[1]
x = x[:, :max_sequence_length]
sequence_length = max_sequence_length
elif args.model == 'rnn':
if x.shape[1] < doc_len:
doc_len = x.shape[1]
if x.shape[2] < sent_len:
sent_len = x.shape[2]
x = x[:, :doc_len, :sent_len]
sequence_length = [doc_len, sent_len]
print("\n### Input preparation ###")
embedding_weights = train_word2vec(x, vocabulary_inv, args.dataset)
embedding_mat = np.array([np.array(embedding_weights[word]) for word in vocabulary_inv])
wstc = WSTC(input_shape=x.shape, n_classes=n_classes, y=y, model=args.model,
vocab_sz=vocab_sz, embedding_matrix=embedding_mat, word_embedding_dim=word_embedding_dim)
if args.trained_weights is None:
print("\n### Phase 1: vMF distribution fitting & pseudo document generation ###")
word_sup_array = np.array([np.array([vocabulary[word] for word in word_class_list]) for word_class_list in word_sup_list])
total_counts = sum(word_counts[ele] for ele in word_counts)
total_counts -= word_counts[vocabulary_inv_list[0]]
background_array = np.zeros(vocab_sz)
for i in range(1,vocab_sz):
background_array[i] = word_counts[vocabulary_inv[i]]/total_counts
seed_docs, seed_label = pseudodocs(word_sup_array, gamma, background_array,
sequence_length, len_avg, len_std, beta, alpha,
vocabulary_inv, embedding_mat, args.model,
'./results/{}/{}/phase1/'.format(args.dataset, args.model))
if args.sup_source == 'docs':
if args.model == 'cnn':
num_real_doc = len(sup_idx.flatten()) * 10
elif args.model == 'rnn':
num_real_doc = len(sup_idx.flatten())
real_seed_docs, real_seed_label = augment(x, sup_idx, num_real_doc)
seed_docs = | np.concatenate((seed_docs, real_seed_docs), axis=0) | numpy.concatenate |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import math
import random
import warnings
import numpy as np
import tensorflow as tf
try:
tf.train.AdamOptimizer
except AttributeError:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import sklearn.metrics
#TODO Clean this
# Animesh commented this line out from gcn.gcn_datasets import GCNDataset
# from gcn.gcn_datasets import GCNDataset
try:
from . import gcn_datasets
except ImportError:
import gcn_datasets
from common.trace import traceln
def init_glorot(shape, name=None):
"""Glorot & Bengio (AISTATS 2010) init."""
init_range = np.sqrt(6.0/(shape[0]+shape[1]))
initial = tf.random_uniform(shape, minval=-init_range, maxval=init_range, dtype=tf.float32)
return tf.Variable(initial, name=name)
def init_normal(shape,stddev,name=None):
initial=tf.random_normal(shape, mean=0.0, stddev=stddev, dtype=np.float32)
return tf.Variable(initial, name=name)
class MultiGraphNN(object):
'''
Abstract Class for a Neural Net learned on a graph list
'''
def train_lG(self,session,gcn_graph_train):
'''
Train an a list of graph
:param session:
:param gcn_graph_train:
:return:
'''
for g in gcn_graph_train:
self.train(session, g, n_iter=1)
def test_lG(self, session, gcn_graph_test, verbose=True):
'''
Test on a list of Graph
:param session:
:param gcn_graph_test:
:return:
'''
acc_tp = np.float64(0.0)
nb_node_total = np.float64(0.0)
mean_acc_test = []
for g in gcn_graph_test:
acc = self.test(session, g, verbose=False)
mean_acc_test.append(acc)
nb_node_total += g.X.shape[0]
acc_tp += acc * g.X.shape[0]
g_acc = np.mean(mean_acc_test)
node_acc = acc_tp / nb_node_total
if verbose:
traceln('\t -- Mean Graph Accuracy', '%.4f' % g_acc)
traceln('\t -- Mean Node Accuracy', '%.4f' % node_acc)
return g_acc,node_acc
def predict_lG(self,session,gcn_graph_predict,verbose=True):
'''
Predict for a list of graph
:param session:
:param gcn_graph_test:
:return:
'''
lY_pred=[]
for g in gcn_graph_predict:
gY_pred = self.predict(session, g, verbose=verbose)
lY_pred.append(gY_pred)
return lY_pred
def predict_prob_lG(self, session, l_gcn_graph, verbose=True):
'''
Predict Probabilities for a list of graph
:param session:
:param l_gcn_graph:
:return a list of predictions
'''
lY_pred = []
for g in l_gcn_graph:
gY_pred = self.prediction_prob(session, g, verbose=verbose)
lY_pred.append(gY_pred)
return lY_pred
def get_nb_params(self):
total_parameters = 0
for variable in tf.trainable_variables():
# shape is an array of tf.Dimension
shape = variable.get_shape()
#traceln(shape)
#traceln(len(shape))
variable_parameters = 1
for dim in shape:
#traceln(dim)
variable_parameters *= dim.value
#traceln(variable_parameters)
total_parameters += variable_parameters
return total_parameters
def train_with_validation_set(self,session,graph_train,graph_val,max_iter,eval_iter=10,patience=7,graph_test=None,save_model_path=None):
'''
Implements training with a validation set
The model is trained and accuracy is measure on a validation sets
In addition, the model can be save and one can perform early stopping thanks to the patience argument
:param session:
:param graph_train: the list of graph to train on
:param graph_val: the list of graph used for validation
:param max_iter: maximum number of epochs
:param eval_iter: evaluate every eval_iter
:param patience: stopped training if accuracy is not improved on the validation set after patience_value
:param graph_test: Optional. If a test set is provided, then accuracy on the test set is reported
:param save_model_path: checkpoints filename to save the model.
:return: A Dictionary with training accuracies, validations accuracies and test accuracies if any, and the Wedge parameters
'''
best_val_acc=0.0
wait=0
stop_training=False
stopped_iter=max_iter
train_accuracies=[]
validation_accuracies=[]
test_accuracies=[]
conf_mat=[]
start_monitoring_val_acc=False
for i in range(max_iter):
if stop_training:
break
if i % eval_iter == 0:
traceln('\n -- Epoch ', i,' Patience ', wait)
_, tr_acc = self.test_lG(session, graph_train, verbose=False)
traceln(' Train Acc ', '%.4f' % tr_acc)
train_accuracies.append(tr_acc)
_, node_acc = self.test_lG(session, graph_val, verbose=False)
traceln(' -- Valid Acc ', '%.4f' % node_acc)
validation_accuracies.append(node_acc)
if save_model_path:
save_path = self.saver.save(session, save_model_path, global_step=i)
if graph_test:
test_graph_acc,test_acc = self.test_lG(session, graph_test, verbose=False)
traceln(' -- Test Acc ', '%.4f' % test_acc,' %.4f' % test_graph_acc)
test_accuracies.append(test_acc)
if node_acc > best_val_acc:
best_val_acc = node_acc
wait = 0
else:
if wait >= patience:
stopped_iter = i
stop_training = True
wait += 1
else:
random.shuffle(graph_train)
for g in graph_train:
self.train(session, g, n_iter=1)
#Final Save
traceln(' -- Stopped Model Training after : ',stopped_iter)
traceln(' -- Validation Accuracies : ',['%.4f' % (100*sx) for sx in validation_accuracies])
#traceln('Final Training Accuracy')
_,node_train_acc = self.test_lG(session, graph_train)
traceln(' -- Final Training Accuracy','%.4f' % node_train_acc)
traceln(' -- Final Valid Acc')
self.test_lG(session, graph_val)
R = {}
R['train_acc'] = train_accuracies
R['val_acc'] = validation_accuracies
R['test_acc'] = test_accuracies
R['stopped_iter'] = stopped_iter
R['confusion_matrix'] = conf_mat
#R['W_edge'] =self.get_Wedge(session)
if graph_test:
_, final_test_acc = self.test_lG(session, graph_test)
traceln(' -- Final Test Acc','%.4f' % final_test_acc)
R['final_test_acc'] = final_test_acc
val = R['val_acc']
traceln(' -- Validation scores', val)
epoch_index = np.argmax(val)
traceln(' -- Best performance on val set: Epoch', epoch_index,val[epoch_index])
traceln(' -- Test Performance from val', test_accuracies[epoch_index])
return R
class EnsembleGraphNN(MultiGraphNN):
'''
An ensemble of Graph NN Models
Construction Outside of class
'''
def __init__(self, graph_nn_models):
self.models = graph_nn_models
def train_lG(self, session, gcn_graph_train):
'''
Train an a list of graph
:param session:
:param gcn_graph_train:
:return:
'''
for m in self.models:
m.train_lG(session, gcn_graph_train)
def test_lG(self, session, gcn_graph_test, verbose=True):
'''
Test on a list of Graph
:param session:
:param gcn_graph_test:
:return:
'''
acc_tp = np.float64(0.0)
nb_node_total = np.float64(0.0)
mean_acc_test = []
Y_pred=self.predict_lG(session,gcn_graph_test)
Y_true =[g.Y for g in gcn_graph_test]
Y_pred_node = np.vstack(Y_pred)
node_acc = sklearn.metrics.accuracy_score(np.argmax(np.vstack(Y_true),axis=1),np.argmax(Y_pred_node,axis=1))
g_acc =-1
#node_acc = acc_tp / nb_node_total
if verbose:
traceln(' -- Mean Graph Accuracy', '%.4f' % g_acc)
traceln(' -- Mean Node Accuracy', '%.4f' % node_acc)
return g_acc, node_acc
def predict_lG(self, session, gcn_graph_predict, verbose=True):
'''
Predict for a list of graph
:param session:
:param gcn_graph_test:
:return:
'''
lY_pred = []
#Seem Very Slow ... Here
#I should predict for all graph
nb_models = float(len(self.models))
for g in gcn_graph_predict:
#Average Proba Here
g_pred=[]
for m in self.models:
gY_pred = m.prediction_prob(session, g, verbose=verbose)
g_pred.append(gY_pred)
#traceln(gY_pred)
lY_pred.append(np.sum(g_pred,axis=0)/nb_models)
return lY_pred
def train_with_validation_set(self, session, graph_train, graph_val, max_iter, eval_iter=10, patience=7,
graph_test=None, save_model_path=None):
raise NotImplementedError
class Logit(MultiGraphNN):
'''
Logistic Regression for MultiGraph
'''
def __init__(self,node_dim,nb_classes,learning_rate=0.1,mu=0.1,node_indim=-1):
self.node_dim=node_dim
self.n_classes=nb_classes
self.learning_rate=learning_rate
self.activation=tf.nn.relu
self.mu=mu
self.optalg = tf.train.AdamOptimizer(self.learning_rate)
self.stack_instead_add=False
self.train_Wn0=True
if node_indim==-1:
self.node_indim=self.node_dim
else:
self.node_indim=node_indim
def create_model(self):
'''
Create the tensorflow graph for the model
:return:
'''
self.nb_node = tf.placeholder(tf.int32,(), name='nb_node')
self.node_input = tf.placeholder(tf.float32, [None, self.node_dim], name='X_')
self.y_input = tf.placeholder(tf.float32, [None, self.n_classes], name='Y')
self.Wnode_layers=[]
self.Bnode_layers=[]
self.W_classif = tf.Variable(tf.random_uniform((self.node_indim, self.n_classes),
-1.0 / math.sqrt(self.node_dim),
1.0 / math.sqrt(self.node_dim)),
name="W_classif",dtype=np.float32)
self.B_classif = tf.Variable(tf.zeros([self.n_classes]), name='B_classif',dtype=np.float32)
self.logits =tf.add(tf.matmul(self.node_input,self.W_classif),self.B_classif)
cross_entropy_source = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_input)
# Global L2 Regulization
self.loss = tf.reduce_mean(cross_entropy_source) + self.mu * tf.nn.l2_loss(self.W_classif)
self.pred = tf.argmax(tf.nn.softmax(self.logits), 1)
self.correct_prediction = tf.equal(self.pred, tf.argmax(self.y_input, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
self.grads_and_vars = self.optalg.compute_gradients(self.loss)
self.train_step = self.optalg.apply_gradients(self.grads_and_vars)
# Add ops to save and restore all the variables.
self.init = tf.global_variables_initializer()
self.saver= tf.train.Saver(max_to_keep=5)
traceln(' -- Number of Params: ', self.get_nb_params())
def save_model(self, session, model_filename):
traceln(" -- Saving Model")
save_path = self.saver.save(session, model_filename)
def restore_model(self, session, model_filename):
self.saver.restore(session, model_filename)
traceln(" -- Model restored.")
def train(self,session,graph,verbose=False,n_iter=1):
'''
Apply a train operation, ie sgd step for a single graph
:param session:
:param graph: a graph from GCN_Dataset
:param verbose:
:param n_iter: (default 1) number of steps to perform sgd for this graph
:return:
'''
#TrainEvalSet Here
for i in range(n_iter):
#traceln('Train',X.shape,EA.shape)
feed_batch = {
self.nb_node: graph.X.shape[0],
self.node_input: graph.X,
self.y_input: graph.Y,
}
Ops =session.run([self.train_step,self.loss], feed_dict=feed_batch)
if verbose:
traceln(' -- Training Loss',Ops[1])
def test(self,session,graph,verbose=True):
'''
Test return the loss and accuracy for the graph passed as argument
:param session:
:param graph:
:param verbose:
:return:
'''
feed_batch = {
self.nb_node: graph.X.shape[0],
self.node_input: graph.X,
self.y_input: graph.Y,
}
Ops =session.run([self.loss,self.accuracy], feed_dict=feed_batch)
if verbose:
traceln(' -- Test Loss',Ops[0],' Test Accuracy:',Ops[1])
return Ops[1]
def predict(self,session,graph,verbose=True):
'''
Does the prediction
:param session:
:param graph:
:param verbose:
:return:
'''
feed_batch = {
self.nb_node: graph.X.shape[0],
self.node_input: graph.X,
}
Ops = session.run([self.pred], feed_dict=feed_batch)
if verbose:
traceln(' -- Got Prediction for:',Ops[0].shape)
return Ops[0]
class EdgeConvNet(MultiGraphNN):
'''
Edge-GCN Model for a graph list
'''
#Variable ignored by the set_learning_options
_setter_variables={
"node_dim":True,"edge_dim":True,"nb_class":True,
"num_layers":True,"lr":True,"mu":True,
"node_indim":True,"nconv_edge":True,
"nb_iter":True,"ratio_train_val":True}
def __init__(self,node_dim,edge_dim,nb_classes,num_layers=1,learning_rate=0.1,mu=0.1,node_indim=-1,nconv_edge=1,
):
self.node_dim=node_dim
self.edge_dim=edge_dim
self.n_classes=nb_classes
self.num_layers=num_layers
self.learning_rate=learning_rate
self.activation=tf.nn.tanh
#self.activation=tf.nn.relu
self.mu=mu
self.optalg = tf.train.AdamOptimizer(self.learning_rate)
self.stack_instead_add=False
self.nconv_edge=nconv_edge
self.residual_connection=False#deprecated
self.shared_We = False#deprecated
self.optim_mode=0 #deprecated
self.init_fixed=False #ignore --for test purpose
self.logit_convolve=False#ignore --for test purpose
self.train_Wn0=True #ignore --for test purpose
self.dropout_rate_edge_feat= 0.0
self.dropout_rate_edge = 0.0
self.dropout_rate_node = 0.0
self.dropout_rate_H = 0.0
self.use_conv_weighted_avg=False
self.use_edge_mlp=False
self.edge_mlp_dim = 5
self.sum_attention=False
if node_indim==-1:
self.node_indim=self.node_dim
else:
self.node_indim=node_indim
def set_learning_options(self,dict_model_config):
"""
Set all learning options that not directly accessible from the constructor
:param kwargs:
:return:
"""
#traceln( -- dict_model_config)
for attrname,val in dict_model_config.items():
#We treat the activation function differently as we can not pickle/serialiaze python function
if attrname=='activation_name':
if val=='relu':
self.activation=tf.nn.relu
elif val=='tanh':
self.activation=tf.nn.tanh
else:
raise Exception('Invalid Activation Function')
if attrname=='stack_instead_add' or attrname=='stack_convolutions':
self.stack_instead_add=val
if attrname not in self._setter_variables:
try:
traceln(' -- set ',attrname,val)
setattr(self,attrname,val)
except AttributeError:
warnings.warn("Ignored options for ECN"+attrname+':'+val)
def fastconvolve(self,Wedge,Bedge,F,S,T,H,nconv,Sshape,nb_edge,dropout_p_edge,dropout_p_edge_feat,
stack=True, use_dropout=False,zwe=None,use_weighted_average=False,
use_edge_mlp=False,Wedge_mlp=None,Bedge_mlp=None,use_attention=False):
'''
:param Wedge: Parameter matrix for edge convolution, with hape (n_conv_edge,edge_dim)
:param F: The Edge Feature Matrix
:param S: the Source (Node,Edge) matrix in sparse format
:param T: the Target (Node,Edge) matrix in sparse format
:param H: The current node layer
:param nconv: The numbder of edge convolutions.
:param Sshape: The shapeof matrix S, and T
:param nb_edge: The number of edges
:param stack: whether to concat all the convolutions or add them
:return: a tensor P of shape ( nconv, node_dim) if stack else P is [node_dim]
'''
#F is n_edge time nconv
#TODO if stack is False we could simply sum,the convolutions and do S diag(sum)T
#It would be faster
#Drop convolution individually t
if use_dropout:
#if False:
conv_dropout_ind = tf.nn.dropout(tf.ones([nconv], dtype=tf.float32), 1 - dropout_p_edge_feat)
ND_conv = tf.diag(conv_dropout_ind)
FW_ = tf.matmul(F, Wedge, transpose_b=True) + Bedge
FW = tf.matmul(FW_,ND_conv)
elif use_edge_mlp:
#Wedge mlp is a shared variable across layer which project edge in a lower dim
FW0 = tf.nn.tanh( tf.matmul(F,Wedge_mlp) +Bedge_mlp )
traceln(' -- FW0', FW0.get_shape())
FW = tf.matmul(FW0, Wedge, transpose_b=True) + Bedge
traceln(' -- FW', FW.get_shape())
else:
FW = tf.matmul(F, Wedge, transpose_b=True) + Bedge
traceln(' -- FW', FW.get_shape())
self.conv =tf.unstack(FW,axis=1)
Cops=[]
alphas=[]
Tr = tf.SparseTensor(indices=T, values=tf.ones([nb_edge], dtype=tf.float32), dense_shape=[Sshape[1],Sshape[0]])
Tr = tf.sparse_reorder(Tr)
TP = tf.sparse_tensor_dense_matmul(Tr,H)
if use_attention:
attn_params = va = init_glorot([2, int(self.node_dim)])
for i, cw in enumerate(self.conv):
#SD= tf.SparseTensor(indices=S,values=cw,dense_shape=[nb_node,nb_edge])
#Warning, pay attention to the ordering of edges
if use_weighted_average:
cw = zwe[i]*cw
if use_dropout:
cwd = tf.nn.dropout(cw, 1.0 -dropout_p_edge)
SD = tf.SparseTensor(indices=S, values=cwd, dense_shape=Sshape)
else:
SD = tf.SparseTensor(indices=S, values=cw, dense_shape=Sshape)
SD =tf.sparse_reorder(SD)
#Does this dropout depends on the convolution ?
#if use_dropout:
# SD = tf.nn.dropout(SD, 1.0 - dropout_p_edge)
Hi =tf.sparse_tensor_dense_matmul(SD,TP)
Cops.append(Hi)
if use_attention:
attn_val = tf.reduce_sum(tf.multiply(attn_params[0], H) + tf.multiply(attn_params[1], Hi), axis=1)
alphas.append(attn_val)
if stack is True:
#If stack we concatenate all the different convolutions
P=tf.concat(Cops,1)
elif use_attention:
alphas_s = tf.stack(alphas, axis=1)
alphas_l = tf.nn.softmax(tf.nn.leaky_relu(alphas_s))
#Not Clean to use the dropout for the edge feat
#Is this dropout necessary Here ? could do without
alphas_do =tf.nn.dropout(alphas_l,1 - dropout_p_edge_feat)
wC = [tf.multiply(tf.expand_dims(tf.transpose(alphas_do)[i], 1), C) for i, C in enumerate(Cops)]
P = tf.add_n(wC)
else:
#Else we take the mean
P=1.0/(tf.cast(nconv,tf.float32))*tf.add_n(Cops)
#traceln('p_add_n',P.get_shape())
return P
@staticmethod
def logitconvolve_fixed(pY,Yt,A_indegree):
'''
Tentative Implement of a fixed logit convolve without taking into account edge features
'''
#warning we should test that Yt is column normalized
pY_Yt = tf.matmul(pY,Yt,transpose_b=True)
#TODO A is dense but shoudl be sparse ....
P =tf.matmul(A_indegree,pY_Yt)
return P
def create_model_stack_convolutions(self):
#Create All the Variables
for i in range(self.num_layers - 1):
if i == 0:
Wnli = tf.Variable(
tf.random_uniform((self.node_dim * self.nconv_edge + self.node_dim, self.node_indim),
-1.0 / math.sqrt(self.node_indim),
1.0 / math.sqrt(self.node_indim)), name='Wnl', dtype=tf.float32)
else:
Wnli = tf.Variable(
tf.random_uniform((self.node_indim * self.nconv_edge + self.node_indim, self.node_indim),
-1.0 / math.sqrt(self.node_indim),
1.0 / math.sqrt(self.node_indim)), name='Wnl', dtype=tf.float32)
traceln(' -- Wnli shape', Wnli.get_shape())
Bnli = tf.Variable(tf.zeros([self.node_indim]), name='Bnl' + str(i), dtype=tf.float32)
Weli = init_glorot([int(self.nconv_edge), int(self.edge_dim)], name='Wel_')
# Weli = tf.Variable(tf.random_normal([int(self.nconv_edge), int(self.edge_dim)], mean=0.0, stddev=1.0),
# dtype=np.float32, name='Wel_')
Beli = tf.Variable(0.01 * tf.ones([self.nconv_edge]), name='Bel' + str(i), dtype=tf.float32)
self.Wnode_layers.append(Wnli)
self.Bnode_layers.append(Bnli)
self.Wed_layers.append(Weli)
self.Bed_layers.append(Beli)
self.train_var.extend((self.Wnode_layers))
self.train_var.extend((self.Wed_layers))
self.Hnode_layers = []
self.W_classif = tf.Variable(
tf.random_uniform((self.node_indim * self.nconv_edge + self.node_indim, self.n_classes),
-1.0 / math.sqrt(self.node_dim),
1.0 / math.sqrt(self.node_dim)),
name="W_classif", dtype=np.float32)
self.B_classif = tf.Variable(tf.zeros([self.n_classes]), name='B_classif', dtype=np.float32)
self.train_var.append((self.W_classif))
self.train_var.append((self.B_classif))
self.node_dropout_ind = tf.nn.dropout(tf.ones([self.nb_node], dtype=tf.float32), 1 - self.dropout_p_node)
self.ND = tf.diag(self.node_dropout_ind)
edge_dropout = self.dropout_rate_edge > 0.0 or self.dropout_rate_edge_feat > 0.0
traceln(' -- Edge Dropout', edge_dropout, self.dropout_rate_edge, self.dropout_rate_edge_feat)
if self.num_layers == 1:
self.H = self.activation(tf.add(tf.matmul(self.node_input, self.Wnl0), self.Bnl0))
self.hidden_layers = [self.H]
traceln(" -- H shape", self.H.get_shape())
P = self.fastconvolve(self.Wel0, self.Bel0, self.F, self.Ssparse, self.Tsparse, self.H, self.nconv_edge,
self.Sshape, self.nb_edge,
self.dropout_p_edge, self.dropout_p_edge_feat, stack=self.stack_instead_add,
use_dropout=edge_dropout)
Hp = tf.concat([self.H, P], 1)
# Hp= P+self.H
Hi = self.activation(Hp)
# Hi_shape = Hi.get_shape()
# traceln(Hi_shape)
self.hidden_layers.append(Hi)
elif self.num_layers > 1:
if self.dropout_rate_node > 0.0:
#H0 = self.activation(tf.matmul(self.ND, tf.add(tf.matmul(self.node_input, self.Wnl0), self.Bnl0)))
H0 = tf.matmul(self.ND, tf.add(tf.matmul(self.node_input, self.Wnl0), self.Bnl0))
else:
#H0 = self.activation(tf.add(tf.matmul(self.node_input, self.Wnl0), self.Bnl0))
H0 = tf.add(tf.matmul(self.node_input, self.Wnl0), self.Bnl0)
self.Hnode_layers.append(H0)
# TODO Default to fast convolve but we change update configs, train and test flags
P = self.fastconvolve(self.Wel0, self.Bel0, self.F, self.Ssparse, self.Tsparse, H0, self.nconv_edge,
self.Sshape, self.nb_edge,
self.dropout_p_edge, self.dropout_p_edge_feat, stack=self.stack_instead_add,
use_dropout=edge_dropout,
)
Hp = tf.concat([H0, P], 1)
# TODO add activation Here.
self.hidden_layers = [self.activation(Hp)]
#self.hidden_layers = [Hp]
for i in range(self.num_layers - 1):
if self.dropout_rate_H > 0.0:
Hi_ = tf.nn.dropout(tf.matmul(self.hidden_layers[-1], self.Wnode_layers[i]) + self.Bnode_layers[i],
1 - self.dropout_p_H)
else:
Hi_ = tf.matmul(self.hidden_layers[-1], self.Wnode_layers[i]) + self.Bnode_layers[i]
if self.residual_connection:
Hi_ = tf.add(Hi_, self.Hnode_layers[-1])
self.Hnode_layers.append(Hi_)
# traceln('Hi_shape', Hi_.get_shape())
#traceln('Hi prevous shape', self.hidden_layers[-1].get_shape())
P = self.fastconvolve(self.Wed_layers[i], self.Bed_layers[i], self.F, self.Ssparse, self.Tsparse, Hi_,
self.nconv_edge, self.Sshape, self.nb_edge,
self.dropout_p_edge, self.dropout_p_edge_feat, stack=self.stack_instead_add,
use_dropout=edge_dropout,
)
Hp = tf.concat([Hi_, P], 1)
Hi = self.activation(Hp)
self.hidden_layers.append(Hi)
def create_model_sum_convolutions(self):
#self.Wed_layers.append(Wel0)
for i in range(self.num_layers-1):
if i==0:
# Animesh's code
Wnli = tf.Variable(tf.random_uniform((2 * self.node_dim, self.node_indim),
#Wnli = tf.Variable(tf.random_uniform((2 * self.node_indim, self.node_indim),
-1.0 / math.sqrt(self.node_indim),
1.0 / math.sqrt(self.node_indim)), name='Wnl',
dtype=tf.float32)
else:
Wnli =tf.Variable(tf.random_uniform( (2*self.node_indim, self.node_indim),
-1.0 / math.sqrt(self.node_indim),
1.0 / math.sqrt(self.node_indim)),name='Wnl',dtype=tf.float32)
Bnli = tf.Variable(tf.zeros([self.node_indim]), name='Bnl'+str(i),dtype=tf.float32)
Weli= init_glorot([int(self.nconv_edge), int(self.edge_dim)], name='Wel_')
#Weli = tf.Variable(tf.random_normal([int(self.nconv_edge), int(self.edge_dim)], mean=0.0, stddev=1.0),
# dtype=np.float32, name='Wel_')
Beli = tf.Variable(0.01*tf.ones([self.nconv_edge]), name='Bel'+str(i),dtype=tf.float32)
self.Wnode_layers.append(Wnli)
self.Bnode_layers.append(Bnli)
self.Wed_layers.append (Weli)
self.Bed_layers.append(Beli)
self.train_var.extend((self.Wnode_layers))
self.train_var.extend((self.Wed_layers))
self.Hnode_layers=[]
self.W_classif = tf.Variable(tf.random_uniform((2*self.node_indim, self.n_classes),
-1.0 / math.sqrt(self.node_dim),
1.0 / math.sqrt(self.node_dim)),
name="W_classif",dtype=np.float32)
self.B_classif = tf.Variable(tf.zeros([self.n_classes]), name='B_classif',dtype=np.float32)
self.train_var.append((self.W_classif))
self.train_var.append((self.B_classif))
self.node_dropout_ind = tf.nn.dropout(tf.ones([self.nb_node], dtype=tf.float32), 1 - self.dropout_p_node)
self.ND = tf.diag(self.node_dropout_ind)
edge_dropout = self.dropout_rate_edge> 0.0 or self.dropout_rate_edge_feat > 0.0
traceln(' -- Edge Dropout',edge_dropout, self.dropout_rate_edge,self.dropout_rate_edge_feat)
if self.num_layers==1:
self.H = self.activation(tf.add(tf.matmul(self.node_input, self.Wnl0), self.Bnl0))
self.hidden_layers = [self.H]
traceln(" -- H shape",self.H.get_shape())
P = self.fastconvolve(self.Wel0,self.Bel0,self.F,self.Ssparse,self.Tsparse,self.H,self.nconv_edge,self.Sshape,self.nb_edge,
self.dropout_p_edge,self.dropout_p_edge_feat,stack=self.stack_instead_add,use_dropout=edge_dropout,
use_attention=self.sum_attention
)
Hp = tf.concat([self.H, P], 1)
Hi=self.activation(Hp)
self.hidden_layers.append(Hi)
elif self.num_layers>1:
if self.dropout_rate_node>0.0:
H0 = self.activation(tf.matmul(self.ND,tf.add(tf.matmul(self.node_input,self.Wnl0), self.Bnl0)))
else:
H0 = self.activation(tf.add(tf.matmul(self.node_input,self.Wnl0),self.Bnl0))
self.Hnode_layers.append(H0)
#TODO Default to fast convolve but we change update configs, train and test flags
P = self.fastconvolve(self.Wel0,self.Bel0, self.F, self.Ssparse, self.Tsparse, H0, self.nconv_edge, self.Sshape,self.nb_edge,
self.dropout_p_edge,self.dropout_p_edge_feat, stack=self.stack_instead_add, use_dropout=edge_dropout,
use_attention=self.sum_attention
)
if self.use_conv_weighted_avg:
Hp = self.zH[0] * H0 + P
else:
Hp = tf.concat([H0, P], 1)
#TODO add activation Here.
#self.hidden_layers = [self.activation(Hp)]
self.hidden_layers = [Hp]
for i in range(self.num_layers-1):
if self.dropout_rate_H > 0.0:
Hi_ = tf.nn.dropout(tf.matmul(self.hidden_layers[-1], self.Wnode_layers[i]) + self.Bnode_layers[i], 1-self.dropout_p_H)
else:
Hi_ = tf.matmul(self.hidden_layers[-1], self.Wnode_layers[i]) + self.Bnode_layers[i]
if self.residual_connection:
Hi_= tf.add(Hi_,self.Hnode_layers[-1])
self.Hnode_layers.append(Hi_)
#traceln('Hi_shape',Hi_.get_shape())
# traceln('Hi prevous shape',self.hidden_layers[-1].get_shape())
P = self.fastconvolve(self.Wed_layers[i],self.Bed_layers[i], self.F, self.Ssparse, self.Tsparse, Hi_, self.nconv_edge,self.Sshape, self.nb_edge,
self.dropout_p_edge,self.dropout_p_edge_feat, stack=self.stack_instead_add, use_dropout=edge_dropout
)
Hp = tf.concat([Hi_, P], 1)
Hi = self.activation(Hp)
self.hidden_layers.append(Hi)
def create_model(self):
'''
Create the tensorflow graph for the model
:return:
'''
self.nb_node = tf.placeholder(tf.int32, (), name='nb_node')
self.nb_edge = tf.placeholder(tf.int32, (), name='nb_edge')
self.node_input = tf.placeholder(tf.float32, [None, self.node_dim], name='X_')
self.y_input = tf.placeholder(tf.float32, [None, self.n_classes], name='Y')
self.dropout_p_H = tf.placeholder(tf.float32, (), name='dropout_prob_H')
self.dropout_p_node = tf.placeholder(tf.float32, (), name='dropout_prob_N')
self.dropout_p_edge = tf.placeholder(tf.float32, (), name='dropout_prob_edges')
self.dropout_p_edge_feat = tf.placeholder(tf.float32, (), name='dropout_prob_edgefeat')
self.S = tf.placeholder(tf.float32, name='S')
self.Ssparse = tf.placeholder(tf.int64, name='Ssparse') # indices
self.Sshape = tf.placeholder(tf.int64, name='Sshape') # indices
self.T = tf.placeholder(tf.float32, [None, None], name='T')
self.Tsparse = tf.placeholder(tf.int64, name='Tsparse')
self.F = tf.placeholder(tf.float32, [None, None], name='F')
std_dev_in = float(1.0 / float(self.node_dim))
self.Wnode_layers = []
self.Bnode_layers = []
self.Wed_layers = []
self.Bed_layers = []
self.zed_layers = []
self.Wedge_mlp_layers = []
# Should Project edge as well ...
self.train_var = []
# if self.node_indim!=self.node_dim:
# Wnl0 = tf.Variable(tf.random_uniform((self.node_dim, self.node_indim),
# -1.0 / math.sqrt(self.node_dim),
# 1.0 / math.sqrt(self.node_dim)),name='Wnl0',dtype=tf.float32)
#
self.Wnl0 = tf.Variable(tf.eye(self.node_dim), name='Wnl0', dtype=tf.float32, trainable=self.train_Wn0)
self.Bnl0 = tf.Variable(tf.zeros([self.node_dim]), name='Bnl0', dtype=tf.float32)
if self.init_fixed: #For testing Purposes
self.Wel0 = tf.Variable(100 * tf.ones([int(self.nconv_edge), int(self.edge_dim)]), name='Wel0',
dtype=tf.float32)
# self.Wel0 =tf.Variable(tf.random_normal([int(self.nconv_edge),int(self.edge_dim)],mean=0.0,stddev=1.0), dtype=np.float32, name='Wel0')
self.Wel0 = init_glorot([int(self.nconv_edge), int(self.edge_dim)], name='Wel0')
self.Bel0 = tf.Variable(0.01 * tf.ones([self.nconv_edge]), name='Bel0', dtype=tf.float32)
traceln(' -- Wel0', self.Wel0.get_shape())
self.train_var.extend([self.Wnl0, self.Bnl0])
self.train_var.append(self.Wel0)
if self.stack_instead_add:
self.create_model_stack_convolutions()
else:
self.create_model_sum_convolutions()
self.logits = tf.add(tf.matmul(self.hidden_layers[-1], self.W_classif), self.B_classif)
cross_entropy_source = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_input)
# Global L2 Regulization
self.loss = tf.reduce_mean(cross_entropy_source) + self.mu * tf.nn.l2_loss(self.W_classif)
self.predict_proba = tf.nn.softmax(self.logits)
self.pred = tf.argmax(tf.nn.softmax(self.logits), 1)
self.correct_prediction = tf.equal(self.pred, tf.argmax(self.y_input, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
self.grads_and_vars = self.optalg.compute_gradients(self.loss)
self.gv_Gn = []
self.train_step = self.optalg.apply_gradients(self.grads_and_vars)
# Add ops to save and restore all the variables.
self.init = tf.global_variables_initializer()
self.saver = tf.train.Saver(max_to_keep=0)
traceln(' -- Number of Params: ', self.get_nb_params())
def create_model_old(self):
'''
Create the tensorflow graph for the model
:return:
'''
self.nb_node = tf.placeholder(tf.int32,(), name='nb_node')
self.nb_edge = tf.placeholder(tf.int32, (), name='nb_edge')
self.node_input = tf.placeholder(tf.float32, [None, self.node_dim], name='X_')
self.y_input = tf.placeholder(tf.float32, [None, self.n_classes], name='Y')
#self.EA_input = tf.placeholder(tf.float32, name='EA_input')
#self.NA_input = tf.placeholder(tf.float32, name='NA_input')
self.dropout_p_H = tf.placeholder(tf.float32,(), name='dropout_prob_H')
self.dropout_p_node = tf.placeholder(tf.float32, (), name='dropout_prob_N')
self.dropout_p_edge = tf.placeholder(tf.float32, (), name='dropout_prob_edges')
self.dropout_p_edge_feat = tf.placeholder(tf.float32, (), name='dropout_prob_edgefeat')
self.S = tf.placeholder(tf.float32, name='S')
self.Ssparse = tf.placeholder(tf.int64, name='Ssparse') #indices
self.Sshape = tf.placeholder(tf.int64, name='Sshape') #indices
self.T = tf.placeholder(tf.float32,[None,None], name='T')
self.Tsparse = tf.placeholder(tf.int64, name='Tsparse')
#self.S_indice = tf.placeholder(tf.in, [None, None], name='S')
self.F = tf.placeholder(tf.float32,[None,None], name='F')
#self.NA_indegree = tf.placeholder(tf.float32, name='NA_indegree')
std_dev_in=float(1.0/ float(self.node_dim))
self.Wnode_layers=[]
self.Bnode_layers=[]
self.Wed_layers=[]
self.Bed_layers=[]
#REFACT1 self.zed_layers = []
#REFACT1 self.Wedge_mlp_layers=[]
#Should Project edge as well ...
self.train_var=[]
#if self.node_indim!=self.node_dim:
# Wnl0 = tf.Variable(tf.random_uniform((self.node_dim, self.node_indim),
# -1.0 / math.sqrt(self.node_dim),
# 1.0 / math.sqrt(self.node_dim)),name='Wnl0',dtype=tf.float32)
#else:
self.Wnl0 = tf.Variable(tf.eye(self.node_dim),name='Wnl0',dtype=tf.float32,trainable=self.train_Wn0)
self.Bnl0 = tf.Variable(tf.zeros([self.node_dim]), name='Bnl0',dtype=tf.float32)
#self.Wel0 =tf.Variable(tf.random_normal([int(self.nconv_edge),int(self.edge_dim)],mean=0.0,stddev=1.0), dtype=np.float32, name='Wel0')
if self.init_fixed:
self.Wel0 = tf.Variable(100*tf.ones([int(self.nconv_edge),int(self.edge_dim)]), name='Wel0',dtype=tf.float32)
elif self.use_edge_mlp:
self.Wel0 = init_glorot([int(self.nconv_edge), int(self.edge_mlp_dim)], name='Wel0')
else:
self.Wel0 = init_glorot([int(self.nconv_edge),int(self.edge_dim)],name='Wel0')
self.Bel0 = tf.Variable(0.01*tf.ones([self.nconv_edge]), name='Bel0' , dtype=tf.float32)
#RF self.zel0 = tf.Variable(tf.ones([self.nconv_edge]), name='zel0' , dtype=tf.float32)
#RF self.zH = tf.Variable(tf.ones([self.num_layers]),name='zH',dtype=tf.float32)
traceln(' -- Wel0',self.Wel0.get_shape())
self.train_var.extend([self.Wnl0,self.Bnl0])
self.train_var.append(self.Wel0)
#Parameter for convolving the logits
''' REFACT1
if self.logit_convolve:
#self.Wel_logits = init_glorot([int(self.nconv_edge),int(self.edge_dim)],name='Wel_logit')
#self.Belg = tf.Variable(tf.zeros( [int(self.nconv_edge)]), name='Belogit' , dtype=tf.float32)
self.Wel_logits = tf.Variable(tf.zeros([int(1),int(self.edge_dim)]), name='Wlogit0',dtype=tf.float32,trainable=False)
self.Belg = tf.Variable(tf.ones( [int(1)]), name='Belogit' , dtype=tf.float32)
#self.logits_Transition = 1.0*tf.Variable(tf.ones([int(self.n_classes) , int(self.n_classes)]), name='logit_Transition')
self.logits_Transition=init_glorot([int(self.n_classes), int(self.n_classes)], name='Wel_')
self.Wmlp_edge_0= init_glorot([int(self.edge_dim), int(self.edge_mlp_dim)], name='Wedge_mlp')
self.Bmlp_edge_0= tf.Variable(tf.ones([self.edge_mlp_dim]),name='Wedge_mlp',dtype=tf.float32)
'''
#self.Wed_layers.append(Wel0)
for i in range(self.num_layers-1):
if self.stack_instead_add:
if i==0:
Wnli = tf.Variable(
tf.random_uniform((self.node_dim * self.nconv_edge + self.node_dim, self.node_indim),
-1.0 / math.sqrt(self.node_indim),
1.0 / math.sqrt(self.node_indim)), name='Wnl', dtype=tf.float32)
else:
Wnli =tf.Variable(tf.random_uniform( (self.node_indim*self.nconv_edge+self.node_indim, self.node_indim),
-1.0 / math.sqrt(self.node_indim),
1.0 / math.sqrt(self.node_indim)),name='Wnl',dtype=tf.float32)
traceln(' -- Wnli shape',Wnli.get_shape())
elif self.use_conv_weighted_avg:
Wnli = tf.Variable(
tf.random_uniform((self.node_indim, self.node_indim),
-1.0 / math.sqrt(self.node_indim),
1.0 / math.sqrt(self.node_indim)), name='Wnl', dtype=tf.float32)
#Wnli = tf.eye(self.node_dim,dtype=tf.float32)
traceln(' -- Wnli shape', Wnli.get_shape())
else:
if i==0:
Wnli = tf.Variable(tf.random_uniform((2 * self.node_indim, self.node_indim),
-1.0 / math.sqrt(self.node_indim),
1.0 / math.sqrt(self.node_indim)), name='Wnl',
dtype=tf.float32)
else:
Wnli =tf.Variable(tf.random_uniform( (2*self.node_indim, self.node_indim),
-1.0 / math.sqrt(self.node_indim),
1.0 / math.sqrt(self.node_indim)),name='Wnl',dtype=tf.float32)
Bnli = tf.Variable(tf.zeros([self.node_indim]), name='Bnl'+str(i),dtype=tf.float32)
#Weli = tf.Variable(tf.ones([int(self.nconv_edge),int(self.edge_dim)],dtype=tf.float32))
if self.use_edge_mlp:
#self.Wel0 = init_glorot([int(self.nconv_edge), int(self.edge_mlp_dim)], name='Wel0')
Weli = init_glorot([int(self.nconv_edge), int(self.edge_mlp_dim)], name='Wel_')
Beli = tf.Variable(0.01 * tf.ones([self.nconv_edge]), name='Bel' + str(i), dtype=tf.float32)
#RF Wmlp_edge_i = init_glorot([int(self.edge_dim), int(self.edge_mlp_dim)], name='Wedge_mlp'+str(i))
#RF Bmlp_edge_i = tf.Variable(tf.ones([self.edge_mlp_dim]), name='Bedge_mlp'+str(i), dtype=tf.float32)
#RF self.Wedge_mlp_layers.append(Wmlp_edge_i)
else:
Weli= init_glorot([int(self.nconv_edge), int(self.edge_dim)], name='Wel_')
#Weli = tf.Variable(tf.random_normal([int(self.nconv_edge), int(self.edge_dim)], mean=0.0, stddev=1.0),
# dtype=np.float32, name='Wel_')
Beli = tf.Variable(0.01*tf.ones([self.nconv_edge]), name='Bel'+str(i),dtype=tf.float32)
#RF Wmlp_edge_i = init_glorot([int(self.edge_dim), int(self.edge_mlp_dim)], name='Wedge_mlp' + str(i))
#RF Bmlp_edge_i = tf.Variable(tf.ones([self.edge_mlp_dim]), name='Bedge_mlp' + str(i), dtype=tf.float32)
#RF self.Wedge_mlp_layers.append(Wmlp_edge_i)
#zeli = tf.Variable(tf.ones([self.nconv_edge]),name='zel'+str(i),dtype=tf.float32)
self.Wnode_layers.append(Wnli)
self.Bnode_layers.append(Bnli)
self.Wed_layers.append (Weli)
self.Bed_layers.append(Beli)
#self.zed_layers.append(zeli)
self.train_var.extend((self.Wnode_layers))
self.train_var.extend((self.Wed_layers))
self.Hnode_layers=[]
#TODO Do we project the firt layer or not ?
# Initialize the weights and biases for a simple one full connected network
if self.stack_instead_add:
self.W_classif = tf.Variable(tf.random_uniform((self.node_indim*self.nconv_edge+self.node_indim, self.n_classes),
-1.0 / math.sqrt(self.node_dim),
1.0 / math.sqrt(self.node_dim)),
name="W_classif",dtype=np.float32)
elif self.use_conv_weighted_avg:
self.W_classif = tf.Variable(tf.random_uniform((self.node_indim, self.n_classes),
-1.0 / math.sqrt(self.node_dim),
1.0 / math.sqrt(self.node_dim)),
name="W_classif", dtype=np.float32)
else:
self.W_classif = tf.Variable(tf.random_uniform((2*self.node_indim, self.n_classes),
-1.0 / math.sqrt(self.node_dim),
1.0 / math.sqrt(self.node_dim)),
name="W_classif",dtype=np.float32)
self.B_classif = tf.Variable(tf.zeros([self.n_classes]), name='B_classif',dtype=np.float32)
self.train_var.append((self.W_classif))
self.train_var.append((self.B_classif))
#Use for true add
#I = tf.eye(self.nb_node)
self.node_dropout_ind = tf.nn.dropout(tf.ones([self.nb_node], dtype=tf.float32), 1 - self.dropout_p_node)
self.ND = tf.diag(self.node_dropout_ind)
edge_dropout = self.dropout_rate_edge> 0.0 or self.dropout_rate_edge_feat > 0.0
traceln(' -- Edge Dropout',edge_dropout, self.dropout_rate_edge,self.dropout_rate_edge_feat)
if self.num_layers==1:
self.H = self.activation(tf.add(tf.matmul(self.node_input, self.Wnl0), self.Bnl0))
self.hidden_layers = [self.H]
traceln("H shape",self.H.get_shape())
P = self.fastconvolve(self.Wel0,self.Bel0,self.F,self.Ssparse,self.Tsparse,self.H,self.nconv_edge,self.Sshape,self.nb_edge,
self.dropout_p_edge,self.dropout_p_edge_feat,stack=self.stack_instead_add,use_dropout=edge_dropout)
Hp = tf.concat([self.H, P], 1)
#Hp= P+self.H
Hi=self.activation(Hp)
#Hi_shape = Hi.get_shape()
#traceln(Hi_shape)
self.hidden_layers.append(Hi)
elif self.num_layers>1:
if self.dropout_rate_node>0.0:
H0 = self.activation(tf.matmul(self.ND,tf.add(tf.matmul(self.node_input, self.Wnl0), self.Bnl0)))
else:
H0 = self.activation(tf.add(tf.matmul(self.node_input,self.Wnl0),self.Bnl0))
self.Hnode_layers.append(H0)
#TODO Default to fast convolve but we change update configs, train and test flags
P = self.fastconvolve(self.Wel0,self.Bel0, self.F, self.Ssparse, self.Tsparse, H0, self.nconv_edge, self.Sshape,self.nb_edge,
self.dropout_p_edge,self.dropout_p_edge_feat, stack=self.stack_instead_add, use_dropout=edge_dropout,
)
#RF zwe=self.zel0,
#RF use_weighted_average=self.use_conv_weighted_avg,
#RF use_edge_mlp=self.use_edge_mlp,
#RFWedge_mlp=self.Wmlp_edge_0,
#RF Bedge_mlp=self.Bmlp_edge_0)
if self.use_conv_weighted_avg:
Hp = self.zH[0] * H0 + P
else:
Hp = tf.concat([H0, P], 1)
#TODO add activation Here.
#self.hidden_layers = [self.activation(Hp)]
self.hidden_layers = [Hp]
for i in range(self.num_layers-1):
if self.dropout_rate_H > 0.0:
Hi_ = tf.nn.dropout(tf.matmul(self.hidden_layers[-1], self.Wnode_layers[i]) + self.Bnode_layers[i], 1-self.dropout_p_H)
else:
Hi_ = tf.matmul(self.hidden_layers[-1], self.Wnode_layers[i]) + self.Bnode_layers[i]
if self.residual_connection:
Hi_= tf.add(Hi_,self.Hnode_layers[-1])
self.Hnode_layers.append(Hi_)
# traceln(' -- Hi_shape',Hi_.get_shape())
# traceln(' -- Hi prevous shape',self.hidden_layers[-1].get_shape())
P = self.fastconvolve(self.Wed_layers[i],self.Bed_layers[i], self.F, self.Ssparse, self.Tsparse, Hi_, self.nconv_edge,self.Sshape, self.nb_edge,
self.dropout_p_edge,self.dropout_p_edge_feat, stack=self.stack_instead_add, use_dropout=edge_dropout,
)
# zwe=self.zed_layers[i],
# use_weighted_average=self.use_conv_weighted_avg,
# use_edge_mlp=self.use_edge_mlp,
# Wedge_mlp=self.Wedge_mlp_layers[i],
#RF Bedge_mlp=self.Bmlp_edge_0)
if self.use_conv_weighted_avg:
Hp = self.zH[i+1]* Hi_ + P
else:
Hp = tf.concat([Hi_, P], 1)
Hi = self.activation(Hp)
self.hidden_layers.append(Hi)
self.logits =tf.add(tf.matmul(self.hidden_layers[-1],self.W_classif),self.B_classif)
cross_entropy_source = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_input)
# Global L2 Regulization
self.loss = tf.reduce_mean(cross_entropy_source) + self.mu * tf.nn.l2_loss(self.W_classif)
self.pred = tf.argmax(tf.nn.softmax(self.logits), 1)
self.predict_proba = tf.nn.softmax(self.logits)
self.correct_prediction = tf.equal(self.pred, tf.argmax(self.y_input, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
self.grads_and_vars = self.optalg.compute_gradients(self.loss)
self.gv_Gn=[]
#TODO Experiment with gradient noise
#if self.stack_instead_add:
# for grad, var in self.grads_and_vars:
# traceln(grad,var)
# if grad is not None:
# self.gv_Gn.append( ( tf.add(grad, tf.random_normal(tf.shape(grad), stddev=0.00001)),var) )
#self.gv_Gn = [(tf.add(grad, tf.random_normal(tf.shape(grad), stddev=0.00001)), val) for grad, val in self.grads_and_vars if is not None]
self.train_step = self.optalg.apply_gradients(self.grads_and_vars)
# Add ops to save and restore all the variables.
self.init = tf.global_variables_initializer()
self.saver= tf.train.Saver(max_to_keep=0)
traceln(' -- Number of Params: ', self.get_nb_params())
def save_model(self, session, model_filename):
traceln("Saving Model")
save_path = self.saver.save(session, model_filename)
def restore_model(self, session, model_filename):
self.saver.restore(session, model_filename)
traceln("Model restored.")
def get_Wedge(self,session):
'''
Return the parameters for the Edge Convolutions
:param session:
:return:
'''
if self.num_layers>1:
L0=session.run([self.Wel0,self.Wed_layers])
We0=L0[0]
list_we=[We0]
for we in L0[1]:
list_we.append(we)
return list_we
else:
L0=session.run([self.Wel0])
We0=L0[0]
list_we=[We0]
return list_we
def train(self,session,graph,verbose=False,n_iter=1):
'''
Apply a train operation, ie sgd step for a single graph
:param session:
:param graph: a graph from GCN_Dataset
:param verbose:
:param n_iter: (default 1) number of steps to perform sgd for this graph
:return:
'''
#TrainEvalSet Here
for i in range(n_iter):
#traceln(' -- Train',X.shape,EA.shape)
#traceln(' -- DropoutEdges',self.dropout_rate_edge)
feed_batch = {
self.nb_node: graph.X.shape[0],
self.nb_edge: graph.F.shape[0],
self.node_input: graph.X,
self.Ssparse: np.array(graph.Sind, dtype='int64'),
self.Sshape: np.array([graph.X.shape[0], graph.F.shape[0]], dtype='int64'),
self.Tsparse: np.array(graph.Tind, dtype='int64'),
self.F: graph.F,
self.y_input: graph.Y,
self.dropout_p_H: self.dropout_rate_H,
self.dropout_p_node: self.dropout_rate_node,
self.dropout_p_edge: self.dropout_rate_edge,
self.dropout_p_edge_feat: self.dropout_rate_edge_feat,
#self.NA_indegree:graph.NA_indegree
}
Ops =session.run([self.train_step,self.loss], feed_dict=feed_batch)
if verbose:
traceln(' -- Training Loss',Ops[1])
def test(self,session,graph,verbose=True):
'''
Test return the loss and accuracy for the graph passed as argument
:param session:
:param graph:
:param verbose:
:return:
'''
feed_batch = {
self.nb_node: graph.X.shape[0],
self.nb_edge: graph.F.shape[0],
self.node_input: graph.X,
self.Ssparse: np.array(graph.Sind, dtype='int64'),
self.Sshape: np.array([graph.X.shape[0], graph.F.shape[0]], dtype='int64'),
self.Tsparse: np.array(graph.Tind, dtype='int64'),
self.F: graph.F,
self.y_input: graph.Y,
self.dropout_p_H: 0.0,
self.dropout_p_node: 0.0,
self.dropout_p_edge: 0.0,
self.dropout_p_edge_feat: 0.0,
#self.NA_indegree: graph.NA_indegree
}
Ops =session.run([self.loss,self.accuracy], feed_dict=feed_batch)
if verbose:
traceln(' -- Test Loss',Ops[0],' Test Accuracy:',Ops[1])
return Ops[1]
def predict(self,session,graph,verbose=True):
'''
Does the prediction
:param session:
:param graph:
:param verbose:
:return:
'''
feed_batch = {
self.nb_node: graph.X.shape[0],
self.nb_edge: graph.F.shape[0],
self.node_input: graph.X,
# fast_gcn.S: np.asarray(graph.S.todense()).squeeze(),
# fast_gcn.Ssparse: np.vstack([graph.S.row,graph.S.col]),
self.Ssparse: np.array(graph.Sind, dtype='int64'),
self.Sshape: np.array([graph.X.shape[0], graph.F.shape[0]], dtype='int64'),
self.Tsparse: np.array(graph.Tind, dtype='int64'),
# fast_gcn.T: np.asarray(graph.T.todense()).squeeze(),
self.F: graph.F,
self.dropout_p_H: 0.0,
self.dropout_p_node: 0.0,
self.dropout_p_edge: 0.0,
self.dropout_p_edge_feat: 0.0,
#self.NA_indegree: graph.NA_indegree
}
Ops = session.run([self.pred], feed_dict=feed_batch)
if verbose:
traceln(' -- Got Prediction for:',Ops[0].shape)
print(str(Ops))
return Ops[0]
def prediction_prob(self,session,graph,verbose=True):
'''
Does the prediction
:param session:
:param graph:
:param verbose:
:return:
'''
feed_batch = {
self.nb_node: graph.X.shape[0],
self.nb_edge: graph.F.shape[0],
self.node_input: graph.X,
# fast_gcn.S: np.asarray(graph.S.todense()).squeeze(),
# fast_gcn.Ssparse: np.vstack([graph.S.row,graph.S.col]),
self.Ssparse: np.array(graph.Sind, dtype='int64'),
self.Sshape: np.array([graph.X.shape[0], graph.F.shape[0]], dtype='int64'),
self.Tsparse: np.array(graph.Tind, dtype='int64'),
# fast_gcn.T: np.asarray(graph.T.todense()).squeeze(),
self.F: graph.F,
self.dropout_p_H: 0.0,
self.dropout_p_node: 0.0,
self.dropout_p_edge: 0.0,
self.dropout_p_edge_feat: 0.0,
#self.NA_indegree: graph.NA_indegree
}
Ops = session.run([self.predict_proba], feed_dict=feed_batch)
if verbose:
traceln(' -- Got Prediction for:',Ops[0].shape)
return Ops[0]
def train_All_lG(self,session,graph_train,graph_val, max_iter, eval_iter = 10, patience = 7, graph_test = None, save_model_path = None):
'''
Merge all the graph and train on them
:param session:
:param graph_train: the list of graph to train on
:param graph_val: the list of graph used for validation
:param max_iter: maximum number of epochs
:param eval_iter: evaluate every eval_iter
:param patience: stopped training if accuracy is not improved on the validation set after patience_value
:param graph_test: Optional. If a test set is provided, then accuracy on the test set is reported
:param save_model_path: checkpoints filename to save the model.
:return: A Dictionary with training accuracies, validations accuracies and test accuracies if any, and the Wedge parameters
'''
best_val_acc = 0.0
wait = 0
stop_training = False
stopped_iter = max_iter
train_accuracies = []
validation_accuracies = []
test_accuracies = []
conf_mat = []
start_monitoring_val_acc = False
# Not Efficient to compute this for
merged_graph = gcn_datasets.GCNDataset.merge_allgraph(graph_train)
self.train(session, merged_graph, n_iter=1)
for i in range(max_iter):
if stop_training:
break
if i % eval_iter == 0:
traceln('\n -- Epoch', i)
_, tr_acc = self.test_lG(session, graph_train, verbose=False)
traceln(' -- Train Acc', '%.4f' % tr_acc)
train_accuracies.append(tr_acc)
_, node_acc = self.test_lG(session, graph_val, verbose=False)
traceln(' -- Valid Acc', '%.4f' % node_acc)
validation_accuracies.append(node_acc)
if save_model_path:
save_path = self.saver.save(session, save_model_path, global_step=i)
if graph_test:
_, test_acc = self.test_lG(session, graph_test, verbose=False)
traceln(' -- Test Acc', '%.4f' % test_acc)
test_accuracies.append(test_acc)
# Ypred = self.predict_lG(session, graph_test,verbose=False)
# Y_true_flat = []
# Ypred_flat = []
# for graph, ypred in zip(graph_test, Ypred):
# ytrue = np.argmax(graph.Y, axis=1)
# Y_true_flat.extend(ytrue)
# Ypred_flat.extend(ypred)
# cm = sklearn.metrics.confusion_matrix(Y_true_flat, Ypred_flat)
# conf_mat.append(cm)
# TODO min_delta
# if tr_acc>0.99:
# start_monitoring_val_acc=True
if node_acc > best_val_acc:
best_val_acc = node_acc
wait = 0
else:
if wait >= patience:
stopped_iter = i
stop_training = True
wait += 1
else:
self.train(session, merged_graph, n_iter=1)
# Final Save
# if save_model_path:
# save_path = self.saver.save(session, save_model_path, global_step=i)
# TODO Add the final step
mean_acc = []
traceln(' -- Stopped Model Training after ', stopped_iter)
traceln(' -- Val Accuracies ', validation_accuracies)
traceln(' -- Final Training Accuracy')
_, node_train_acc = self.test_lG(session, graph_train)
traceln(' -- Train Mean Accuracy', '%.4f' % node_train_acc)
traceln(' -- Final Valid Acc')
self.test_lG(session, graph_val)
R = {}
R['train_acc'] = train_accuracies
R['val_acc'] = validation_accuracies
R['test_acc'] = test_accuracies
R['stopped_iter'] = stopped_iter
R['confusion_matrix'] = conf_mat
# R['W_edge'] =self.get_Wedge(session)
if graph_test:
_, final_test_acc = self.test_lG(session, graph_test)
traceln(' -- Final Test Acc', '%.4f' % final_test_acc)
R['final_test_acc'] = final_test_acc
return R
class GraphConvNet(MultiGraphNN):
'''
A Deep Standard GCN model for a graph list
'''
def __init__(self,node_dim,nb_classes,num_layers=1,learning_rate=0.1,mu=0.1,node_indim=-1,
dropout_rate=0.0,dropout_mode=0):
self.node_dim=node_dim
self.n_classes=nb_classes
self.num_layers=num_layers
self.learning_rate=learning_rate
self.activation=tf.nn.relu
self.mu=mu
self.optalg = tf.train.AdamOptimizer(self.learning_rate)
self.convolve_last=False
self.dropout_rate=dropout_rate
#0 No dropout 1, Node Dropout at input 2 Standard dropout for layer
# check logit layer
self.dropout_mode=dropout_mode
if node_indim==-1:
self.node_indim=self.node_dim
else:
self.node_indim=node_indim
def create_model(self):
self.nb_node = tf.placeholder(tf.int32,(), name='nb_node')
self.node_input = tf.placeholder(tf.float32, [None, self.node_dim], name='X_')
self.y_input = tf.placeholder(tf.float32, [None, self.n_classes], name='Y')
self.NA_input = tf.placeholder(tf.float32, name='NA_input') #Normalized Adjacency Matrix Here
self.dropout_p = tf.placeholder(tf.float32,(), name='dropout_prob')
#std_dev_in=float(1.0/ float(self.node_dim))
self.Wnode_layers=[]
self.Bnode_layers=[]
std_dev_input=float(1.0/ float(self.node_dim))
std_dev_indim=float(1.0/ float(self.node_indim))
if self.node_indim!=self.node_dim:
self.Wnode = init_glorot((self.node_dim,self.node_indim),name='Wn0')
#self.Wnode = init_normal((self.node_dim, self.node_indim),std_dev_input,name='Wn0')
else:
self.Wnode = tf.Variable(tf.eye(self.node_dim),name='Wn0',dtype=tf.float32)
self.Bnode = tf.Variable(tf.zeros([self.node_indim]), name='Bnode',dtype=tf.float32)
for i in range(self.num_layers-1):
Wnli =init_glorot((2*self.node_indim, self.node_indim),name='Wnl'+str(i))
#Wnli = init_normal((self.node_indim, self.node_indim),std_dev_indim, name='Wnl' + str(i))
self.Wnode_layers.append(Wnli)
#The GCN do not seem to use a bias term
#Bnli = tf.Variable(tf.zeros([self.node_indim]), name='Bnl'+str(i),dtype=tf.float32)
#self.Bnode_layers.append(Bnli)
self.W_classif = init_glorot((2*self.node_indim, self.n_classes),name="W_classif")
self.B_classif = tf.Variable(tf.zeros([self.n_classes]), name='B_classif',dtype=np.float32)
#Input Layer
#Check the self-loop . Is included in the normalized adjacency matrix
#Check Residual Connections as weel for deeper models
#add dropout_placeholder ... to differentiate train and test
#x = tf.nn.dropout(x, 1 - self.dropout)
#Dropout some nodes at the input of the graph ?
#Should I dropout in upper layers as well ?
#This way this forces the net to infer the node labels from its neighbors only
#Here I dropout the features, but node the edges ..
self.node_dropout_ind = tf.nn.dropout(tf.ones([self.nb_node],dtype=tf.float32),1-self.dropout_p)
self.ND = tf.diag(self.node_dropout_ind)
if self.dropout_mode==1:
#self.H = self.activation(tf.matmul(self.ND,tf.add(tf.matmul(self.node_input, self.Wnode), self.Bnode)))
P0 = self.activation(tf.matmul(self.ND, tf.add(tf.matmul(self.node_input, self.Wnode), self.Bnode)))
self.hidden_layers = [self.H]
else:
H0 =tf.add(tf.matmul(self.node_input, self.Wnode), self.Bnode)
P0 =tf.matmul(self.NA_input, H0) # we should forget the self loop
H0_ = self.activation(tf.concat([H0, P0], 1))
self.hidden_layers=[H0_]
#self.H = self.activation(tf.add(tf.matmul(self.node_input, self.Wnode), self.Bnode))
#self.hidden_layers = [self.H]
for i in range(self.num_layers-1):
if self.dropout_mode==2:
Hp = tf.nn.dropout(self.hidden_layers[-1],1-self.dropout_p)
Hi_ = tf.matmul(Hp, self.Wnode_layers[i])
else:
Hi_ = tf.matmul(self.hidden_layers[-1], self.Wnode_layers[i])
P =tf.matmul(self.NA_input, Hi_) #we should forget the self loop
#Hp = tf.concat([H0, P], 1)
Hi = self.activation(tf.concat([Hi_,P],1))
self.hidden_layers.append(Hi)
#This dropout the logits as in GCN
if self.dropout_mode==2:
Hp = tf.nn.dropout(self.hidden_layers[-1], 1 - self.dropout_p)
self.hidden_layers.append(Hp)
if self.convolve_last is True:
logit_0 = tf.add(tf.matmul(self.hidden_layers[-1], self.W_classif), self.B_classif)
self.logits = tf.matmul(self.NA_input,logit_0) #No activation function here
else:
self.logits =tf.add(tf.matmul(self.hidden_layers[-1],self.W_classif),self.B_classif)
cross_entropy_source = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_input)
#Global L2 Regulization
self.loss = tf.reduce_mean(cross_entropy_source) + self.mu * tf.nn.l2_loss(self.W_classif)
self.correct_prediction = tf.equal(tf.argmax(tf.nn.softmax(self.logits), 1), tf.argmax(self.y_input, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
self.grads_and_vars = self.optalg.compute_gradients(self.loss)
self.train_step = self.optalg.apply_gradients(self.grads_and_vars)
traceln(' -- Number of Params: ', self.get_nb_params())
# Add ops to save and restore all the variables.
self.init = tf.global_variables_initializer()
self.saver = tf.train.Saver()
def train(self,session,g,n_iter=1,verbose=False):
#TrainEvalSet Here
for i in range(n_iter):
feed_batch={
self.nb_node:g.X.shape[0],
self.node_input:g.X,
self.y_input:g.Y,
self.NA_input:g.NA,
self.dropout_p:self.dropout_rate
}
Ops =session.run([self.train_step,self.loss], feed_dict=feed_batch)
if verbose:
traceln(' -- Training Loss',Ops[1])
def test(self,session,g,verbose=True):
#TrainEvalSet Here
feed_batch={
self.nb_node:g.X.shape[0],
self.node_input:g.X,
self.y_input:g.Y,
self.NA_input:g.NA,
self.dropout_p: 0.0
}
Ops =session.run([self.loss,self.accuracy], feed_dict=feed_batch)
if verbose:
traceln(' -- Test Loss',Ops[0],' Test Accuracy:',Ops[1])
return Ops[1]
class EdgeLogit(Logit):
'''
Logistic Regression for MultiGraph
'''
def train(self,session,graph,verbose=False,n_iter=1):
'''
Apply a train operation, ie sgd step for a single graph
:param session:
:param graph: a graph from GCN_Dataset
:param verbose:
:param n_iter: (default 1) number of steps to perform sgd for this graph
:return:
'''
#TrainEvalSet Here
for i in range(n_iter):
#traceln(' -- Train',X.shape,EA.shape)
nb_edge =graph.E.shape[0]
half_edge =nb_edge/2
feed_batch = {
self.nb_node: graph.EC.shape[0], #here we pass the number of edges
self.node_input: graph.EC,
self.y_input: graph.Yedge,
#self.nb_node: half_edge, #here we pass the number of edges
#self.node_input: graph.F[:half_edge],
#self.y_input: graph.Yedge[:half_edge],
}
Ops =session.run([self.train_step,self.loss], feed_dict=feed_batch)
if verbose:
traceln(' -- Training Loss',Ops[1])
def test(self,session,graph,verbose=True):
'''
Test return the loss and accuracy for the graph passed as argument
:param session:
:param graph:
:param verbose:
:return:
'''
nb_edge = graph.E.shape[0]
half_edge = nb_edge / 2
feed_batch = {
self.nb_node: graph.EC.shape[0],
self.node_input: graph.EC,
self.y_input: graph.Yedge,
#self.nb_node: half_edge, # here we pass the number of edges
#self.node_input: graph.F[:half_edge],
#self.y_input: graph.Yedge[:half_edge],
}
Ops =session.run([self.loss,self.accuracy], feed_dict=feed_batch)
if verbose:
traceln(' -- Test Loss',Ops[0],' Test Accuracy:',Ops[1])
return Ops[1]
def predict(self,session,graph,verbose=True):
'''
Does the prediction
:param session:
:param graph:
:param verbose:
:return:
'''
nb_edge = graph.E.shape[0]
half_edge = nb_edge / 2
feed_batch = {
self.nb_node: graph.EC.shape[0],
self.node_input: graph.EC,
#self.nb_node: half_edge, # here we pass the number of edges
#self.node_input: graph.F[:half_edge],
#self.y_input: graph.Yedge[:, half_edge],
}
Ops = session.run([self.pred], feed_dict=feed_batch)
if verbose:
traceln(' -- Got Prediction for:',Ops[0].shape)
return Ops[0]
#TODO Benchmark on Snake, GCN, ECN, graphAttNet vs Cora
#TODO Refactorize Code
#TODO Add L2 Regularization
#TODO Stack or Add Convolution -> Reduce the size
# Force the attention to preserve the node information i.e alpha'= 0.8 I +0.2 alpha
# Force doing attention only for the logit ?
# with a two layer
# Branching factor --> Attention
# Logit Layer and Attention
# There is one diff with ECN the feature for the edges are dynamically calculated
# whereas for GAT they are conditionned on the currect Node features
# 0.88
# Do a dot product attention or something different ...
# Change Initialization of the attention vector
# with Attn vector equal [x;0] the attention keeps the source features and do not propagate ...
# Should reduce Nb of parameters
# This notion of edges is completely arbritrary
# We could at all nodes in the graph to see whether there are some depencies, no ?, interesting exp
class GraphAttNet(MultiGraphNN):
'''
Graph Attention Network
'''
# Variable ignored by the set_learning_options
_setter_variables = {
"node_dim": True, "edge_dim": True, "nb_class": True,
"num_layers": True, "lr": True,
"node_indim": True, "nb_attention": True,
"nb_iter": True, "ratio_train_val": True}
def __init__(self,node_dim,nb_classes,num_layers=1,learning_rate=0.1,node_indim=-1,nb_attention=3
):
self.node_dim=node_dim
self.n_classes=nb_classes
self.num_layers=num_layers
self.learning_rate=learning_rate
self.activation=tf.nn.elu
#self.activation=tf.nn.relu
self.optalg = tf.train.AdamOptimizer(self.learning_rate)
self.stack_instead_add=False
self.residual_connection=False#deprecated
self.mu=0.0
self.dropout_rate_node = 0.0
self.dropout_rate_attention = 0.0
self.nb_attention=nb_attention
self.distinguish_node_from_neighbor=False
self.original_model=False
self.attn_type=0
self.dense_model=False
if node_indim==-1:
self.node_indim=self.node_dim
else:
self.node_indim=node_indim
#TODO GENERIC Could be move in MultigraphNN
def set_learning_options(self,dict_model_config):
"""
Set all learning options that not directly accessible from the constructor
:param kwargs:
:return:
"""
traceln(dict_model_config)
for attrname,val in dict_model_config.items():
#We treat the activation function differently as we can not pickle/serialiaze python function
if attrname=='activation_name':
if val=='relu':
self.activation=tf.nn.relu
elif val=='tanh':
self.activation=tf.nn.tanh
else:
raise Exception('Invalid Activation Function')
if attrname=='stack_instead_add' or attrname=='stack_convolutions':
self.stack_instead_add=val
if attrname not in self._setter_variables:
try:
traceln(' -- set',attrname,val)
setattr(self,attrname,val)
except AttributeError:
warnings.warn("Ignored options for ECN"+attrname+':'+val)
def dense_graph_attention_layer(self,H,W,A,nb_node,dropout_attention,dropout_node,use_dropout=False):
'''
Implement a dense attention layer where every node is connected to everybody
:param A:
:param H:
:param W:
:param dropout_attention:
:param dropout_node:
:param use_dropout:
:return:
'''
'''
for all i,j aHi + bHj
repmat all first column contains H1 second columns H2 etc
diag may be a special case
'''
with tf.name_scope('graph_att_dense_attn'):
P = tf.matmul(H, W)
Aij_forward = tf.expand_dims(A[0], 0) # attention vector for forward edge and backward edge
Aij_backward = tf.expand_dims(A[1], 0) # Here we assume it is the same on contrary to the paper
# Compute the attention weight for target node, ie a . Whj if j is the target node
att_target_node = tf.matmul(P, Aij_backward,transpose_b=True)
# Compute the attention weight for the source node, ie a . Whi if j is the target node
att_source_node = tf.matmul(P, Aij_forward, transpose_b=True)
Asrc_vect = tf.tile(att_source_node,[nb_node,1])
Asrc = tf.reshape(Asrc_vect,[nb_node,nb_node])
Atgt_vect = tf.tile(att_target_node, [nb_node,1])
Atgt = tf.reshape(Atgt_vect, [nb_node, nb_node])
Att = tf.nn.leaky_relu(Asrc+Atgt)
#Att = tf.nn.leaky_relu(Asrc)
alphas = tf.nn.softmax(Att)
# dropout is done after the softmax
if use_dropout:
traceln(' -- ... using dropout for attention layer')
alphasD = tf.nn.dropout(alphas, 1.0 - dropout_attention)
P_D = tf.nn.dropout(P, 1.0 - dropout_node)
alphasP = tf.matmul(alphasD, P_D)
return alphasD, alphasP
else:
# We compute the features given by the attentive neighborhood
alphasP = tf.matmul(alphas, P)
return alphas, alphasP
#TODO Change the transpose of the A parameter
def simple_graph_attention_layer(self,H,W,A,S,T,Adjind,Sshape,nb_edge,
dropout_attention,dropout_node,
use_dropout=False,add_self_loop=False,attn_type=0):
'''
:param H: The current node feature
:param W: The node projection for this layer
:param A: The attention weight vector: a
:param S: The source edge matrix indices
:param T: The target edge matrix indices
:param Adjind: The adjcency matrix indices
:param Sshape: Shape of S
:param nb_edge: Number of edge
:param dropout_attention: dropout_rate for the attention
:param use_dropout: wether to use dropout
:param add_self_loop: wether to add edge (i,i)
:return: alphas,nH
where alphas is the attention-based adjancency matrix alpha[i,j] correspond to alpha_ij
nH correspond to the new features for this layer ie alphas(H,W)
'''
with tf.name_scope('graph_att_net_attn'):
# This has shape (nb_node,in_dim) and correspond to the project W.h in the paper
P=tf.matmul(H,W)
#traceln(P.get_shape())
#This has shape #shape,(nb_edge,nb_node)
#This sparse tensor contains target nodes for edges.
#The indices are [edge_idx,node_target_index]
Tr = tf.SparseTensor(indices=T, values=tf.ones([nb_edge], dtype=tf.float32),
dense_shape=[Sshape[1], Sshape[0]])
Tr = tf.sparse_reorder(Tr) # reorder so that sparse operations work correctly
# This tensor has shape(nb_edge,in_dim) and contains the node target projection, ie Wh
TP = tf.sparse_tensor_dense_matmul(Tr, P,name='TP')
# This has shape #shape,(nb_node,nb_edge)
# This sparse tensor contains source nodes for edges.
# The indices are [node_source_index,edge_idx]
SD = tf.SparseTensor(indices=S, values=tf.ones([nb_edge],dtype=tf.float32), dense_shape=Sshape)
SD = tf.sparse_reorder(SD) #shape,(nb_edge,nb_node)
# This tensor has shape(nb_edge,in_dim) and contains the node source projection, ie Wh
SP = tf.sparse_tensor_dense_matmul(tf.sparse_transpose(SD), P,name='SP') #shape(nb_edge,in_dim)
#traceln(' -- SP', SP.get_shape())
#Deprecated
if attn_type==1:
#Mutlitplication Attn Module
Aij_forward = A # attention vector for forward edge and backward edge
Aij_backward = A # Here we assume it is the same on contrary to the paper
# Compute the attention weight for target node, ie a . Whj if j is the target node
att_target_node = tf.multiply(TP, Aij_forward[0])
# Compute the attention weight for the source node, ie a . Whi if j is the target node
att_source_node = tf.multiply(SP, Aij_backward[0])
# The attention values for the edge ij is the sum of attention of node i and j
# Attn( node_i, node_j) = Sum_k (a_k)^2 Hik Hjk Is this what we want ?
att_source_target_node = tf.reduce_sum( tf.multiply(att_source_node,att_target_node),axis=1)
attn_values = tf.nn.leaky_relu( att_source_target_node)
#
elif attn_type==2:
#Inspired by learning to rank approach on w(x+-x-)
# Attn( node_i, node_j) = Sum_k (a_k) (Hik- Hjk) Is this what we want ?
att_source_target_node = tf.reduce_sum( tf.multiply(SP-TP,A[0]),axis=1)
attn_values = tf.nn.leaky_relu( att_source_target_node)
else:
Aij_forward=tf.expand_dims(A[0],0) # attention vector for forward edge and backward edge
Aij_backward=tf.expand_dims(A[1],0) # Here we assume it is the same on contrary to the paper
# Compute the attention weight for target node, ie a . Whj if j is the target node
att_target_node =tf.matmul(TP,Aij_backward,transpose_b=True)
# Compute the attention weight for the source node, ie a . Whi if j is the target node
att_source_node = tf.matmul(SP,Aij_forward,transpose_b=True)
# The attention values for the edge ij is the sum of attention of node i and j
attn_values = tf.nn.leaky_relu(tf.squeeze(att_target_node) + tf.squeeze(att_source_node))
# From that we build a sparse adjacency matrix containing the correct values
# which we then feed to a sparse softmax
AttAdj = tf.SparseTensor(indices=Adjind, values=attn_values, dense_shape=[Sshape[0], Sshape[0]])
AttAdj = tf.sparse_reorder(AttAdj)
#Note very efficient to do this, we should add the loop in the preprocessing
if add_self_loop:
node_indices=tf.range(Sshape[0])
#Sparse Idendity
Aij_forward = tf.expand_dims(A[0], 0)
id_indices = tf.stack([node_indices, node_indices], axis=1)
val =tf.squeeze(tf.matmul(P,Aij_forward,transpose_b=True))
spI = tf.SparseTensor(indices=id_indices,values=2.0*val,dense_shape=[Sshape[0], Sshape[0]])
AttAdj_I = tf.sparse_add(AttAdj,spI)
alphas = tf.sparse_softmax(AttAdj_I)
else:
alphas = tf.sparse_softmax(AttAdj)
#dropout is done after the softmax
if use_dropout:
traceln(' -- ... using dropout for attention layer')
alphasD = tf.SparseTensor(indices=alphas.indices,values=tf.nn.dropout(alphas.values, 1.0 - dropout_attention),dense_shape=alphas.dense_shape)
P_D =tf.nn.dropout(P,1.0-dropout_node)
alphasP = tf.sparse_tensor_dense_matmul(alphasD, P_D)
return alphasD, alphasP
else:
#We compute the features given by the attentive neighborhood
alphasP = tf.sparse_tensor_dense_matmul(alphas,P)
return alphas,alphasP
def _create_original_model(self):
std_dev_in = float(1.0 / float(self.node_dim))
self.use_dropout = self.dropout_rate_attention > 0 or self.dropout_rate_node > 0
self.hidden_layer = []
attns0 = []
# Define the First Layer from the Node Input
for a in range(self.nb_attention):
# H0 = Maybe do a common H0 and have different attention parameters
# Change the attention, maybe ?
# Do multiplicative
# How to add edges here
# Just softmax makes a differences
# I could stack [current_node,representation; edge_features;] and do a dot product on that
Wa = init_glorot([int(self.node_dim), int(self.node_indim)], name='Wa0' + str(a))
va = init_glorot([2, int(self.node_indim)], name='va0' + str(a))
if self.distinguish_node_from_neighbor:
H0 = tf.matmul(self.node_input, Wa)
attns0.append(H0)
_, nH = self.simple_graph_attention_layer(self.node_input, Wa, va, self.Ssparse, self.Tsparse, self.Aind,
self.Sshape, self.nb_edge, self.dropout_p_attn,
self.dropout_p_node,
use_dropout=self.use_dropout, add_self_loop=True)
attns0.append(nH)
self.hidden_layer.append(
self.activation(tf.concat(attns0, axis=-1))) # Now dims should be indim*self.nb_attention
# Define Intermediate Layers
for i in range(1, self.num_layers):
attns = []
for a in range(self.nb_attention):
if self.distinguish_node_from_neighbor:
Wia = init_glorot(
[int(self.node_indim * self.nb_attention + self.node_indim), int(self.node_indim)],
name='Wa' + str(i) + '_' + str(a))
else:
Wia = init_glorot([int(self.node_indim * self.nb_attention), int(self.node_indim)],
name='Wa' + str(i) + '_' + str(a))
via = init_glorot([2, int(self.node_indim)], name='va' + str(i) + '_' + str(a))
_, nH = self.simple_graph_attention_layer(self.hidden_layer[-1], Wia, via, self.Ssparse, self.Tsparse,
self.Aind,
self.Sshape, self.nb_edge, self.dropout_p_attn,
self.dropout_p_node,
use_dropout=self.use_dropout, add_self_loop=True)
attns.append(nH)
self.hidden_layer.append(self.activation(tf.concat(attns, axis=-1)))
# Define Logit Layer
out = []
for i in range(self.nb_attention):
#for i in range(1):
logits_a = init_glorot([int(self.node_indim * self.nb_attention), int(self.n_classes)],
name='Logita' + '_' + str(a))
via = init_glorot([2, int(self.n_classes)], name='LogitA' + '_' + str(a))
_, nL = self.simple_graph_attention_layer(self.hidden_layer[-1], logits_a, via, self.Ssparse, self.Tsparse,
self.Aind,
self.Sshape, self.nb_edge, self.dropout_p_attn,
self.dropout_p_node,
use_dropout=self.use_dropout, add_self_loop=True)
out.append(nL)
self.logits = tf.add_n(out) / self.nb_attention
#self.logits = out[0]
def _create_nodedistint_model(self):
'''
Create a model the separe node distinct models
:return:
'''
std_dev_in = float(1.0 / float(self.node_dim))
self.use_dropout = self.dropout_rate_attention > 0 or self.dropout_rate_node > 0
self.hidden_layer = []
attns0 = []
# Define the First Layer from the Node Input
Wa = tf.eye(int(self.node_dim), name='I0')
H0 = tf.matmul(self.node_input, Wa)
attns0.append(H0)
I = tf.Variable(tf.eye(self.node_dim), trainable=False)
for a in range(self.nb_attention):
# H0 = Maybe do a common H0 and have different attention parameters
# Change the attention, maybe ?
# Do multiplicative
# How to add edges here
# Just softmax makes a differences
# I could stack [current_node,representation; edge_features;] and do a dot product on that
va = init_glorot([2, int(self.node_dim)], name='va0' + str(a))
_, nH = self.simple_graph_attention_layer(H0, I, va, self.Ssparse, self.Tsparse, self.Aind,
self.Sshape, self.nb_edge, self.dropout_p_attn,
self.dropout_p_node,
use_dropout=self.use_dropout, add_self_loop=False,attn_type=self.attn_type)
attns0.append(nH)
self.hidden_layer.append(
self.activation(tf.concat(attns0, axis=-1))) # Now dims should be indim*self.nb_attention
# Define Intermediate Layers
for i in range(1, self.num_layers):
attns = []
if i == 1:
previous_layer_dim =int(self.node_dim * self.nb_attention + self.node_dim)
Wia = init_glorot([previous_layer_dim, int(self.node_indim)],
name='Wa' + str(i) + '_' + str(a))
else:
previous_layer_dim = int(self.node_indim * self.nb_attention + self.node_indim)
Wia = init_glorot( [previous_layer_dim, int(self.node_indim)], name='Wa' + str(i) + '_' + str(a))
Hi = tf.matmul(self.hidden_layer[-1], Wia)
attns.append(Hi)
Ia = tf.Variable(tf.eye(self.node_indim), trainable=False)
for a in range(self.nb_attention):
via = init_glorot([2, int(self.node_indim)], name='va' + str(i) + '_' + str(a))
_, nH = self.simple_graph_attention_layer(Hi, Ia, via, self.Ssparse, self.Tsparse,
self.Aind,
self.Sshape, self.nb_edge, self.dropout_p_attn,
self.dropout_p_node,
use_dropout=self.use_dropout, add_self_loop=False,attn_type=self.attn_type)
attns.append(nH)
self.hidden_layer.append(self.activation(tf.concat(attns, axis=-1)))
# Define Logit Layer
#TODO Add Attention on Logit Layer
#It would not cost too much to add an attn mecha once I get the logits
#If x,y are indicated in the node feature then we can implicitly find the type of edges that we are using ...
if self.num_layers>1:
logits_a = init_glorot([int(self.node_indim * self.nb_attention+self.node_indim), int(self.n_classes)],
name='Logita' + '_' + str(a))
else:
logits_a = init_glorot([int(self.node_dim * self.nb_attention + self.node_dim), int(self.n_classes)],
name='Logita' + '_' + str(a))
Bc = tf.ones([int(self.n_classes)], name='LogitA' + '_' + str(a))
# self.logits = tf.add_n(out) / self.nb_attention
self.logits = tf.matmul(self.hidden_layer[-1],logits_a) +Bc
def _create_densegraph_model(self):
'''
Create a model the separe node distinct models
:return:
'''
std_dev_in = float(1.0 / float(self.node_dim))
self.use_dropout = self.dropout_rate_attention > 0 or self.dropout_rate_node > 0
self.hidden_layer = []
attns0 = []
# Define the First Layer from the Node Input
Wa = tf.eye(int(self.node_dim), name='I0')
H0 = tf.matmul(self.node_input, Wa)
attns0.append(H0)
I = tf.Variable(tf.eye(self.node_dim), trainable=False)
for a in range(self.nb_attention):
# H0 = Maybe do a common H0 and have different attention parameters
# Change the attention, maybe ?
# Do multiplicative
# How to add edges here
# Just softmax makes a differences
# I could stack [current_node,representation; edge_features;] and do a dot product on that
va = init_glorot([2, int(self.node_dim)], name='va0' + str(a))
_, nH = self.dense_graph_attention_layer(H0, I, va, self.nb_node, self.dropout_p_attn,
self.dropout_p_node,
use_dropout=self.use_dropout)
attns0.append(nH)
self.hidden_layer.append(
self.activation(tf.concat(attns0, axis=-1))) # Now dims should be indim*self.nb_attention
# Define Intermediate Layers
for i in range(1, self.num_layers):
attns = []
if i == 1:
previous_layer_dim =int(self.node_dim * self.nb_attention + self.node_dim)
Wia = init_glorot([previous_layer_dim, int(self.node_indim)],
name='Wa' + str(i) + '_' + str(a))
else:
previous_layer_dim = int(self.node_indim * self.nb_attention + self.node_indim)
Wia = init_glorot( [previous_layer_dim, int(self.node_indim)], name='Wa' + str(i) + '_' + str(a))
Hi = tf.matmul(self.hidden_layer[-1], Wia)
attns.append(Hi)
Ia = tf.Variable(tf.eye(self.node_indim), trainable=False)
for a in range(self.nb_attention):
via = init_glorot([2, int(self.node_indim)], name='va' + str(i) + '_' + str(a))
_, nH = self.dense_graph_attention_layer(Hi, Ia, via, self.nb_node,
self.dropout_p_attn,
self.dropout_p_node,
use_dropout=self.use_dropout)
attns.append(nH)
self.hidden_layer.append(self.activation(tf.concat(attns, axis=-1)))
# Define Logit Layer
#TODO Add Attention on Logit Layer
#It would not cost too much to add an attn mecha once I get the logits
#If x,y are indicated in the node feature then we can implicitly find the type of edges that we are using ...
if self.num_layers>1:
logits_a = init_glorot([int(self.node_indim * self.nb_attention+self.node_indim), int(self.n_classes)],
name='Logita' + '_' + str(a))
else:
logits_a = init_glorot([int(self.node_dim * self.nb_attention + self.node_dim), int(self.n_classes)],
name='Logita' + '_' + str(a))
Bc = tf.ones([int(self.n_classes)], name='LogitA' + '_' + str(a))
# self.logits = tf.add_n(out) / self.nb_attention
self.logits = tf.matmul(self.hidden_layer[-1],logits_a) +Bc
def create_model(self):
'''
Create the tensorflow graph for the model
:return:
'''
self.nb_node = tf.placeholder(tf.int32,(), name='nb_node')
self.nb_edge = tf.placeholder(tf.int32, (), name='nb_edge')
self.node_input = tf.placeholder(tf.float32, [None, self.node_dim], name='X_')
self.y_input = tf.placeholder(tf.float32, [None, self.n_classes], name='Y')
#self.dropout_p_H = tf.placeholder(tf.float32,(), name='dropout_prob_H')
self.dropout_p_node = tf.placeholder(tf.float32, (), name='dropout_prob_N')
self.dropout_p_attn = tf.placeholder(tf.float32, (), name='dropout_prob_edges')
self.S = tf.placeholder(tf.float32, name='S')
self.Ssparse = tf.placeholder(tf.int64, name='Ssparse') #indices
self.Sshape = tf.placeholder(tf.int64, name='Sshape') #indices
self.Aind =tf.placeholder(tf.int64, name='Sshape') #Adjacency indices
self.T = tf.placeholder(tf.float32,[None,None], name='T')
self.Tsparse = tf.placeholder(tf.int64, name='Tsparse')
#self.S_indice = tf.placeholder(tf.in, [None, None], name='S')
#self.F = tf.placeholder(tf.float32,[None,None], name='F')
if self.original_model:
self._create_original_model()
elif self.dense_model:
self._create_densegraph_model()
else:
self._create_nodedistint_model()
cross_entropy_source = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_input)
self.loss = tf.reduce_mean(cross_entropy_source)
self.predict_proba = tf.nn.softmax(self.logits)
self.pred = tf.argmax(tf.nn.softmax(self.logits), 1)
self.correct_prediction = tf.equal(self.pred, tf.argmax(self.y_input, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
self.grads_and_vars = self.optalg.compute_gradients(self.loss)
self.train_step = self.optalg.apply_gradients(self.grads_and_vars)
# Add ops to save and restore all the variables.
self.init = tf.global_variables_initializer()
self.saver= tf.train.Saver(max_to_keep=0)
traceln(' -- Number of Params: ', self.get_nb_params())
#TODO Move in MultigraphNN
def save_model(self, session, model_filename):
traceln("Saving Model")
save_path = self.saver.save(session, model_filename)
def restore_model(self, session, model_filename):
self.saver.restore(session, model_filename)
traceln("Model restored.")
def train(self,session,graph,verbose=False,n_iter=1):
'''
Apply a train operation, ie sgd step for a single graph
:param session:
:param graph: a graph from GCN_Dataset
:param verbose:
:param n_iter: (default 1) number of steps to perform sgd for this graph
:return:
'''
#TrainEvalSet Here
for i in range(n_iter):
#traceln(' -- Train',X.shape,EA.shape)
#traceln(' -- DropoutEdges',self.dropout_rate_edge)
Aind = np.array(np.stack([graph.Sind[:, 0], graph.Tind[:, 1]], axis=-1), dtype='int64')
feed_batch = {
self.nb_node: graph.X.shape[0],
self.nb_edge: graph.F.shape[0],
self.node_input: graph.X,
self.Ssparse: np.array(graph.Sind, dtype='int64'),
self.Sshape: np.array([graph.X.shape[0], graph.F.shape[0]], dtype='int64'),
self.Tsparse: np.array(graph.Tind, dtype='int64'),
#self.F: graph.F,
self.Aind: Aind,
self.y_input: graph.Y,
#self.dropout_p_H: self.dropout_rate_H,
self.dropout_p_node: self.dropout_rate_node,
self.dropout_p_attn: self.dropout_rate_attention,
}
Ops =session.run([self.train_step,self.loss], feed_dict=feed_batch)
if verbose:
traceln(' -- Training Loss',Ops[1])
def test(self,session,graph,verbose=True):
'''
Test return the loss and accuracy for the graph passed as argument
:param session:
:param graph:
:param verbose:
:return:
'''
Aind = np.array(np.stack([graph.Sind[:, 0], graph.Tind[:, 1]], axis=-1), dtype='int64')
feed_batch = {
self.nb_node: graph.X.shape[0],
self.nb_edge: graph.F.shape[0],
self.node_input: graph.X,
self.Ssparse: np.array(graph.Sind, dtype='int64'),
self.Sshape: np.array([graph.X.shape[0], graph.F.shape[0]], dtype='int64'),
self.Tsparse: np.array(graph.Tind, dtype='int64'),
self.Aind: Aind,
#self.F: graph.F,
self.y_input: graph.Y,
#self.dropout_p_H: 0.0,
self.dropout_p_node: 0.0,
self.dropout_p_attn: 0.0,
#self.dropout_p_edge_feat: 0.0,
#self.NA_indegree: graph.NA_indegree
}
Ops =session.run([self.loss,self.accuracy], feed_dict=feed_batch)
if verbose:
traceln(' -- Test Loss',Ops[0],' Test Accuracy:',Ops[1])
return Ops[1]
def predict(self,session,graph,verbose=True):
'''
Does the prediction
:param session:
:param graph:
:param verbose:
:return:
'''
Aind = np.array(np.stack([graph.Sind[:, 0], graph.Tind[:, 1]], axis=-1), dtype='int64')
feed_batch = {
self.nb_node: graph.X.shape[0],
self.nb_edge: graph.F.shape[0],
self.node_input: graph.X,
# fast_gcn.S: np.asarray(graph.S.todense()).squeeze(),
# fast_gcn.Ssparse: np.vstack([graph.S.row,graph.S.col]),
self.Ssparse: np.array(graph.Sind, dtype='int64'),
self.Sshape: np.array([graph.X.shape[0], graph.F.shape[0]], dtype='int64'),
self.Tsparse: | np.array(graph.Tind, dtype='int64') | numpy.array |
r"""<video width="100%" autoplay loop controls>
<source src="https://github.com/pkomiske/EnergyFlow/raw/images/CMS2011AJets_EventSpaceTriangulation.mp4"
type="video/mp4">
</video>
<br>
The Energy Mover's Distance (EMD), also known as the Earth Mover's
Distance, is a metric between particle collider events introduced in
[1902.02346](https://arxiv.org/abs/1902.02346). This submodule contains
convenient functions for computing EMDs between individual events and
collections of events. The core of the computation is done using the
[Python Optimal Transport (POT)](https://pot.readthedocs.io) library,
which must be installed in order to use this submodule.
From Eq. 1 in [1902.02346](https://arxiv.org/abs/1902.02346), the EMD between
two events is the minimum ''work'' required to rearrange one event $\mathcal E$
into the other $\mathcal E'$ by movements of energy $f_{ij}$ from particle $i$
in one event to particle $j$ in the other:
$$
\text{EMD}(\mathcal E,\mathcal E^\prime)=\min_{\{f_{ij}\}}\sum_{ij}f_{ij}\frac{
\theta_{ij}}{R} + \left|\sum_iE_i-\sum_jE^\prime_j\right|,\\
f_{ij}\ge 0, \quad \sum_jf_{ij}\le E_i, \quad \sum_if_{ij}\le E^\prime_j, \quad
\sum_{ij}f_{ij}=E_\text{min},
$$
where $E_i,E^\prime_j$ are the energies of the particles in the two events,
$\theta_{ij}$ is an angular distance between particles, and
$E_\text{min}=\min\left(\sum_iE_i,\,\sum_jE^\prime_j\right)$ is the smaller
of the two total energies. In a hadronic context, transverse momenta are used
instead of energies.
"""
from __future__ import absolute_import, division, print_function
import itertools
import multiprocessing
import sys
import time
import numpy as np
ot = True
try:
from ot.lp import emd_c, check_result
from scipy.spatial.distance import _distance_wrap # ot imports scipy anyway
except:
ot = False
from energyflow.utils import create_pool, p4s_from_ptyphims
__all__ = ['emd', 'emds']
# replace public functions with those issuing simple errors
if not ot:
def emd(*args, **kwargs):
raise NotImplementedError("emd currently requires module 'ot', which is unavailable")
def emds(*args, **kwargs):
raise NotImplementedError("emd currently requires module 'ot', which is unavailable")
# the actual functions for this module
if ot:
##################
# HELPER FUNCTIONS
##################
# parameter checks
def _check_params(norm, gdim, phi_col, measure, coords, empty_policy):
# check norm
if norm is None:
raise ValueError("'norm' cannot be None")
# check phi_col
if phi_col < 1:
raise ValueError("'phi_col' cannot be smaller than 1")
# check gdim
if gdim is not None:
if gdim < 1:
raise ValueError("'gdim' must be greater than or equal to 1")
if phi_col > gdim + 1:
raise ValueError("'phi_col' must be less than or equal to 'gdim'")
# check measure
if measure not in {'euclidean', 'spherical'}:
raise ValueError("'measure' must be one of 'euclidean', 'spherical'")
# check coords
if coords not in {'hadronic', 'cartesian'}:
raise ValueError("'coords' must be one of 'hadronic', 'cartesian'")
# check empty_policy
if not (isinstance(empty_policy, (int, float)) or empty_policy == 'error'):
raise ValueError("'empty_policy' must be a number or 'error'")
# process events for EMD calculation
two_pi = 2*np.pi
def _process_for_emd(event, norm, gdim, periodic_phi, phi_col,
mask, R, hadr2cart, euclidean, error_on_empty):
# ensure event is at least a 2d numpy array
event = np.atleast_2d(event) if gdim is None else np.atleast_2d(event)[:,:(gdim+1)]
# if we need to map hadronic coordinates to cartesian ones
if hadr2cart:
event = p4s_from_ptyphims(event)
# select the pts and coords
pts, coords = event[:,0], event[:,1:]
# norm vectors if spherical
if not euclidean:
# special case for three dimensions (most common), twice as fast
if coords.shape[1] == 3:
coords /= np.sqrt(coords[:,0]**2 + coords[:,1]**2 + coords[:,2]**2)[:,None]
else:
coords /= np.sqrt(np.sum(coords**2, axis=1))[:,None]
# handle periodic phi (only applicable if using euclidean)
elif periodic_phi:
if phi_col >= event.shape[1] - 1:
evgdim = str(event.shape[1] - 1)
raise ValueError("'phi_col' cannot be larger than the ground space "
'dimension, which is ' + evgdim + ' for one of the events')
coords[:,phi_col] %= two_pi
# handle masking out particles farther than R away from origin
if mask:
# ensure contiguous coords for scipy distance function
coords = np.ascontiguousarray(coords, dtype=np.double)
origin = np.zeros((1, coords.shape[1]))
# calculate distance from origin
rs = _cdist(origin, coords, euclidean, periodic_phi, phi_col)[0]
rmask = (rs <= R)
# detect when masking actually needs to occur
if not np.all(rmask):
pts, coords = pts[rmask], coords[rmask]
# check that we have at least one particle
if pts.size == 0:
if error_on_empty:
raise ValueError('empty event encountered, must have at least one particle')
else:
return (None, None)
# handle norming pts or adding extra zeros to event
if norm:
pts = pts/pts.sum()
elif norm is None:
pass
else:
coords = np.vstack((coords, np.zeros(coords.shape[1])))
pts = np.concatenate((pts, np.zeros(1)))
return (np.ascontiguousarray(pts, dtype=np.double),
np.ascontiguousarray(coords, dtype=np.double))
# faster than scipy's cdist function because we can avoid their checks
def _cdist(X, Y, euclidean, periodic_phi, phi_col):
if euclidean:
if periodic_phi:
# delta phis (taking into account periodicity)
# aleady guaranteed for values to be in [0, 2pi]
d_phis = np.pi - np.abs(np.pi - np.abs(X[:,phi_col,None] - Y[:,phi_col]))
# split out common case of having only one other dimension
if X.shape[1] == 2:
non_phi_col = 1 - phi_col
d_ys = X[:,non_phi_col,None] - Y[:,non_phi_col]
out = | np.sqrt(d_ys**2 + d_phis**2) | numpy.sqrt |
from unittest import TestCase
from tests.assertions import CustomAssertions
import scipy.sparse
import numpy as np
import tests.rabi as rabi
import floq
class TestSetBlock(TestCase):
def setUp(self):
self.dim_block = 5
self.n_block = 3
self.a, self.b, self.c, self.d, self.e, self.f, self.g, self.h, self.i \
= [j*np.ones([self.dim_block, self.dim_block]) for j in range(9)]
matrix = np.bmat([[self.a, self.b, self.c],
[self.d, self.e, self.f],
[self.g, self.h, self.i]])
self.original = np.array(matrix)
total_size = self.dim_block*self.n_block
self.copy = np.zeros([total_size,total_size])
def test_set(self):
# Try to recreate self.original with the new function
floq.evolution._add_block(self.a, self.copy, self.dim_block, self.n_block, 0, 0)
floq.evolution._add_block(self.b, self.copy, self.dim_block, self.n_block, 0, 1)
floq.evolution._add_block(self.c, self.copy, self.dim_block, self.n_block, 0, 2)
floq.evolution._add_block(self.d, self.copy, self.dim_block, self.n_block, 1, 0)
floq.evolution._add_block(self.e, self.copy, self.dim_block, self.n_block, 1, 1)
floq.evolution._add_block(self.f, self.copy, self.dim_block, self.n_block, 1, 2)
floq.evolution._add_block(self.g, self.copy, self.dim_block, self.n_block, 2, 0)
floq.evolution._add_block(self.h, self.copy, self.dim_block, self.n_block, 2, 1)
floq.evolution._add_block(self.i, self.copy, self.dim_block, self.n_block, 2, 2)
self.assertTrue(np.array_equal(self.copy,self.original))
class TestAssembleK(CustomAssertions):
def setUp(self):
self.n_zones = 5
self.frequency = 1
dim=2
a = -1.*np.ones([dim, dim])
b = np.zeros([dim, dim])
c = np.ones([dim, dim])
z = np.zeros([dim, dim])
i = np.identity(dim)
self.goalk = np.array(
np.bmat(
[[b-2*i, a, z, z, z],
[c, b-i, a, z, z],
[z, c, b, a, z],
[z, z, c, b+i, a],
[z, z, z, c, b+2*i]]))
self.hf = floq.system._canonicalise_operator(np.array([a, b, c]))
def test_dense(self):
builtk = floq.evolution.assemble_k(self.hf, self.n_zones, self.frequency)
self.assertArrayEqual(builtk, self.goalk)
def test_sparse(self):
builtk = floq.evolution.assemble_k_sparse(self.hf, self.n_zones,
self.frequency)
self.assertTrue(scipy.sparse.issparse(builtk))
self.assertArrayEqual(builtk.toarray(), self.goalk)
class TestDenseToSparse(CustomAssertions):
def test_conversion(self):
goal = floq.types.ColumnSparseMatrix(np.array([1, 2]),
np.array([1, 0, 1]),
np.array([2, 1, 3]))
built = floq.evolution._dense_to_sparse(np.arange(4).reshape(2, 2))
self.assertColumnSparseMatrixEqual(built, goal)
class TestAssembledK(CustomAssertions):
def setUp(self):
self.n_zones = 5
dim=2
a = -1.*np.ones([dim, dim])
b = np.zeros([dim, dim])
c = | np.ones([dim, dim]) | numpy.ones |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 19 13:16:25 2015
@author: hbanks
Brevity required, prudence preferred
"""
import os
import io
import glob
import errno
import copy
import json
import time
import warnings
import numpy as np
from scipy.optimize import curve_fit
import scipy.interpolate as spi
import scipy.optimize as spo
import scipy.integrate as intgt
import scipy.fftpack as fft
import scipy.special as spl
import matplotlib.pyplot as plt
import scipy.ndimage as ndimage
import itertools as itt
import multiprocessing as mp
import sys
sys.path.append('/Users/marketing/Desktop/HSG-turbo/')
import hsganalysis.QWPProcessing as qwp
from hsganalysis.QWPProcessing.extractMatrices import makeT,saveT
np.set_printoptions(linewidth=500)
# One of the main results is the HighSidebandCCD.sb_results array. These are the
# various mappings between index and real value
# I deally, this code should be converted to pandas to avoid this issue,
# but that's outside the scope of current work.
# [sb number, Freq (eV), Freq error (eV), Gauss area (arb.), Area error, Gauss linewidth (eV), Linewidth error (eV)]
# [ 0 , 1 , 2, , 3 , 4 , 5 , 6 ]
class sbarr(object):
SBNUM = 0
CENFREQ = 1
CENFREQERR = 2
AREA = 3
AREAERR = 4
WIDTH = 5
WIDTHERR = 6
####################
# Objects
####################
class CCD(object):
def __init__(self, fname, spectrometer_offset=None):
"""
This will read the appropriate file and make a basic CCD object. Fancier
things will be handled with the sub classes.
Creates:
self.parameters = Dictionary holding all of the information from the
data file, which comes from the JSON encoded header in the data
file
self.description = string that is the text box from data taking GUI
self.raw_data = raw data output by measurement software, wavelength vs.
data, errors. There may be text for some of the entries
corresponding to text used for Origin imports, but they
should appear as np.nan
self.ccd_data = semi-processed 1600 x 3 array of photon energy vs. data with standard error of mean at that pixel
calculated by taking multiple images. Standard error is calculated from
the data collection software
Most subclasses should make a self.proc_data, which will do whatever
processing is required to the ccd_data, such as normalizing, taking ratios,
etc.
:param fname: file name where the data is saved
:type fname: str
:param spectrometer_offset: if the spectrometer won't go where it's told, use this to correct the wavelengths (nm)
:type spectrometer_offset: float
"""
self.fname = fname
# Checking restrictions from Windows path length limits. Check if you can
# open the file:
try:
with open(fname) as f: pass
except FileNotFoundError:
# Couldn't find the file. Could be you passed the wrong one, but I'm
# finding with a large number of subfolders for polarimetry stuff,
# you end up exceeding Windows' filelength limit.
# Haven't tested on Mac or UNC moutned drives (e.g \\128.x.x.x\Sherwin\)
fname = r"\\?\\" + os.path.abspath(fname)
# Read in the JSON-formatted parameter string.
# The lines are all prepended by '#' for easy numpy importing
# so loop over all those lines
with open(fname, 'r') as f:
param_str = ''
line = f.readline()
while line[0] == '#':
### changed 09/17/18
# This line assumed there was a single '#'
# param_str += line[1:]
# while this one handles everal (because I found old files
# which had '## <text>...'
param_str += line.replace("#", "")
line = f.readline()
# Parse the JSON string
try:
self.parameters = json.loads(param_str)
except json.JSONDecodeError:
# error from _really_ old data where comments were dumped after a
# single-line json dumps
self.parameters=json.loads(param_str.splitlines()[0])
# Spec[trometer] steps are set to define the same physical data, but taken at
# different spectrometer center wavelengths. This value is used later
# for stitching these scans together
try:
self.parameters["spec_step"] = int(self.parameters["spec_step"])
except (ValueError, KeyError):
# If there isn't a spe
self.parameters["spec_step"] = 0
# Slice through 3 to get rid of comments/origin info.
# Would likely be better to check np.isnan() and slicing out those nans.
# I used flipup so that the x-axis is an increasing function of frequency
self.raw_data = np.flipud(np.genfromtxt(fname, comments='#', delimiter=',')[3:])
# The camera chip is 1600 pixels wide. This line was redudent with the [3:]
# slice above and served to make sure there weren't extra stray bad lines
# hanging around.
#
# This should also be updated some day to compensate for any horizontal bining
# on the chip, or masking out points that are bad (cosmic ray making it
# through processing, room lights or monitor lines interfering with signal)
self.ccd_data = np.array(self.raw_data[:1600, :])
# Check to see if the spectrometer offset is set. This isn't specified
# during data collection. This is a value that can be appended
# when processing if it's realized the data is offset.
# This allows the offset to be specified and kept with the data file itself,
# instead of trying to do it in individual processing scripts
#
# It's allowed as a kwarg parameter in this script for trying to determine
# what the correct offset should be
if spectrometer_offset is not None or "offset" in self.parameters:
try:
self.ccd_data[:, 0] += float(self.parameters["offset"])
except:
self.ccd_data[:, 0] += spectrometer_offset
# Convert from nm to eV
# self.ccd_data[:, 0] = 1239.84 / self.ccd_data[:, 0]
self.ccd_data[:, 0] = photon_converter["nm"]["eV"](self.ccd_data[:, 0])
class Photoluminescence(CCD):
def __init__(self, fname):
"""
This object handles PL-type data. The only distinction from the parent class
is that the CCD data gets normalized to the exposure time to make different
exposures directly comparable.
creates:
self.proc_data = self.ccd_data divided by the exposure time
units: PL counts / second
:param fname: name of the file
:type fname: str
"""
super(Photoluminescence, self).__init__(fname)
# Create a copy of the array , and then normalize the signal and the errors
# by the exposure time
self.proc_data = np.array(self.ccd_data)
self.proc_data[:, 1] = self.proc_data[:, 1] / self.parameters['exposure']
self.proc_data[:, 2] = self.proc_data[:, 2] / self.parameters['exposure']
class Absorbance(CCD):
def __init__(self, fname):
"""
There are several ways Absorbance data can be loaded
You could try to load the abs data output from data collection directly,
which has the wavelength, raw, blank and actual absorbance data itself.
This is best way to do it.
Alternatively, you could want to load the raw transmission/reference
data, ignoring (or maybe not even having) the abs calculated
from the data collection software. If you want to do it this way,
you should pass fname as a list where the first element is the
file name for the reference data, and the second is the absorbance data
At first, it didn't really seem to make sense to let you pass just the
raw reference or raw abs data,
Creates:
self.ref_data = np array of the reference,
freq (eV) vs. reference (counts)
self.raw_data = np.array of the raw absorption spectrum,
freq (eV) vs. reference (counts)
self.proc_data = np.array of the absorption spectrum
freq (eV) vs. "absorbance" (dB)
Note, the error bars for this data haven't been defined.
:param fname: either an absorbance filename, or a length 2 list of filenames
:type fname: str
:return: None
"""
if "abs_" in fname:
super(Absorbance, self).__init__(fname)
# Separate into the separate data sets
# The raw counts of the reference data
self.ref_data = np.array(self.ccd_data[:, [0, 1]])
# Raw counts of the sample
self.raw_data = np.array(self.ccd_data[:, [0, 2]])
# The calculated absorbance data (-10*log10(raw/ref))
self.proc_data = np.array(self.ccd_data[:, [0, 3]]) # Already in dB's
else:
# Should be here if you pass the reference/trans filenames
try:
super(Absorbance, self).__init__(fname[0])
self.ref_data = np.array(self.ccd_data)
super(Absorbance, self).__init__(fname[1])
self.raw_data = np.array(self.ccd_data)
except ValueError:
# ValueError gets thrown when importing older data
# which had more headers than data columns. Enforce
# only loading first two columns to avoid numpy trying
# to parse all of the data
# See CCD.__init__ for what's going on.
self.ref_data = np.flipud(np.genfromtxt(fname[0], comments='#',
delimiter=',', usecols=(0, 1)))
self.ref_data = np.array(self.ref_data[:1600, :])
self.ref_data[:, 0] = 1239.84 / self.ref_data[:, 0]
self.raw_data = np.flipud(np.genfromtxt(fname[1], comments='#',
delimiter=',', usecols=(0, 1)))
self.raw_data = np.array(self.raw_data[:1600, :])
self.raw_data[:, 0] = 1239.84 / self.raw_data[:, 0]
except Exception as e:
print("Exception opening absorbance data,", e)
# Calculate the absorbance from the raw camera counts.
self.proc_data = np.empty_like(self.ref_data)
self.proc_data[:, 0] = self.ref_data[:, 0]
self.proc_data[:, 1] = -10*np.log10(self.raw_data[:, 1] / self.ref_data[:,
1])
def abs_per_QW(self, qw_number):
"""
:param qw_number: number of quantum wells in the sample.
:type qw_number: int
:return: None
"""
"""
This method turns the absorption to the absorbance per quantum well. Is
that how this data should be reported?
Also, I'm not sure if columns 1 and 2 are correct.
"""
temp_abs = -np.log(self.proc_data[:, 1] / self.proc_data[:, 2]) / qw_number
self.proc_data = np.hstack((self.proc_data, temp_abs))
def fft_smooth(self, cutoff, inspectPlots=False):
"""
This function removes the Fabry-Perot that affects the absorption data
creates:
self.clean = np.array of the Fourier-filtered absorption data, freq (eV) vs. absorbance (dB!)
self.parameters['fourier cutoff'] = the low pass cutoff frequency, in eV**(-1)
:param cutoff: Fourier frequency of the cut off for the low pass filter
:type cutoff: int or float
:param inspectPlots: Do you want to see the results?
:type inspectPlots: bool
:return: None
"""
# self.fixed = -np.log10(abs(self.raw_data[:, 1]) / abs(self.ref_data[:, 1]))
# self.fixed = np.nan_to_num(self.proc_data[:, 1])
# self.fixed = np.column_stack((self.raw_data[:, 0], self.fixed))
self.parameters['fourier cutoff'] = cutoff
self.clean = low_pass_filter(self.proc_data[:, 0], self.proc_data[:, 1], cutoff, inspectPlots)
def save_processing(self, file_name, folder_str, marker='', index=''):
"""
This bad boy saves the absorption spectrum that has been manipulated.
Saves 100 lines of comments.
:param file_name: The base name of the file to be saved
:type file_name: str
:param folder_str: The name of the folder where the file will be saved
:type folder_str: str
:param marker: A further label that might be the series tag or something
:type marker: str
:param index: If multiple files are being saved with the same name, include an integer to append to the end of the file
:type index: int
:return: None
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
self.save_name = spectra_fname
try:
parameter_str = json.dumps(self.parameters, sort_keys=True, indent=4, separators=(',', ': '))
except:
print("Source: EMCCD_image.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
num_lines = parameter_str.count('#') # Make the number of lines constant so importing into Origin is easier
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
origin_import_spec = '\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.'
spec_header = '#' + parameter_str + origin_import_spec
# spec_header = '#' + parameter_str + '\n#' + self.description[:-2] + origin_import_spec
np.savetxt(os.path.join(folder_str, spectra_fname), self.proc_data, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
spectra_fname = 'clean ' + spectra_fname
np.savetxt(os.path.join(folder_str, spectra_fname), self.clean, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
print("Save image.\nDirectory: {}".format(os.path.join(folder_str, spectra_fname)))
# class LaserLineCCD(HighSidebandCCD):
# """
# Class for use when doing alinging/testing by sending the laser
# directly into the CCD. Modifies how "sidebands" and guess and fit,
# simply looking at the max signal.
# """
# def guess_sidebands(self, cutoff=8, verbose=False, plot=False):
# pass
class NeonNoiseAnalysis(CCD):
"""
This class is used to make handling neon calibration lines easier. It's not great.
"""
def __init__(self, fname, spectrometer_offset=None):
# print 'opening', fname
super(NeonNoiseAnalysis, self).__init__(fname, spectrometer_offset=spectrometer_offset)
self.addenda = self.parameters['addenda']
self.subtrahenda = self.parameters['subtrahenda']
self.noise_and_signal()
self.process_stuff()
def noise_and_signal(self):
"""
This bad boy calculates the standard deviation of the space between the
neon lines.
The noise regions are, in nm:
high: 784-792
low1: 795-806
low2: 815-823
low3: 831-834
the peaks are located at, in nm:
#1, weak: 793.6
#2, medium: 794.3
#3, medium: 808.2
#4, weak: 825.9
#5, strong: 830.0
"""
print('\n\n')
self.ccd_data = np.flipud(self.ccd_data)
# self.high_noise_region = np.array(self.ccd_data[30:230, :])
self.high_noise_region = np.array(self.ccd_data[80:180, :]) # for dark current measurements
self.low_noise_region1 = np.array(self.ccd_data[380:700, :])
self.low_noise_region2 = np.array(self.ccd_data[950:1200, :])
self.low_noise_region3 = np.array(self.ccd_data[1446:1546, :])
# self.high_noise = np.std(self.high_noise_region[:, 1])
self.high_noise_std = np.std(self.high_noise_region[:, 1])
self.high_noise_sig = np.mean(self.high_noise_region[:, 1])
self.low_noise1 = np.std(self.low_noise_region1[:, 1])
self.low_noise2 = np.std(self.low_noise_region2[:, 1])
self.low_noise_std = np.std(self.low_noise_region2[:, 1])
self.low_noise_sig = np.mean(self.low_noise_region2[:, 1])
self.low_noise3 = np.std(self.low_noise_region3[:, 1])
# self.noise_list = [self.high_noise, self.low_noise1, self.low_noise2, self.low_noise3]
self.peak1 = np.array(self.ccd_data[303:323, :])
self.peak2 = np.array(self.ccd_data[319:339, :])
self.peak3 = np.array(self.ccd_data[736:746, :])
self.peak4 = np.array(self.ccd_data[1268:1288, :])
self.peak5 = np.array(self.ccd_data[1381:1421, :])
temp_max = np.argmax(self.peak1[:, 1])
self.signal1 = np.sum(self.peak1[temp_max - 1:temp_max + 2, 1])
self.error1 = np.sqrt(np.sum(self.peak1[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak2[:, 1])
self.signal2 = np.sum(self.peak2[temp_max - 1:temp_max + 2, 1])
self.error2 = np.sqrt(np.sum(self.peak2[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak3[:, 1])
self.signal3 = np.sum(self.peak3[temp_max - 1:temp_max + 2, 1])
self.error3 = np.sqrt(np.sum(self.peak3[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak4[:, 1])
self.signal4 = np.sum(self.peak4[temp_max - 1:temp_max + 2, 1])
self.error4 = np.sqrt(np.sum(self.peak4[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak5[:, 1])
self.signal5 = np.sum(self.peak5[temp_max - 1:temp_max + 2, 1])
self.error5 = np.sqrt(np.sum(self.peak5[temp_max - 1:temp_max + 2, 2] ** 2))
self.signal_list = [self.signal1, self.signal2, self.signal3, self.signal4, self.signal5]
self.error_list = [self.error1, self.error2, self.error3, self.error4, self.error5]
print("Signal list:", self.signal_list)
self.ccd_data = np.flipud(self.ccd_data)
def process_stuff(self):
"""
This one puts high_noise, low_noise1, signal2, and error2 in a nice horizontal array
"""
# self.results = np.array([self.high_noise, self.low_noise1, self.signal5, self.error5])
# average = np.mean([self.low_noise1, self.low_noise2, self.low_noise3])
# self.results = np.array([self.high_noise, self.low_noise1, self.low_noise2, self.low_noise3, self.high_noise/average])
self.results = np.array([self.high_noise_sig, self.high_noise_std, self.low_noise_sig, self.low_noise_std])
def collect_noise(neon_list, param_name, folder_name, file_name, name='Signal'):
"""
This function acts like save parameter sweep.
param_name = string that we're gonna save!
"""
# param_array = None
for elem in neon_list:
print("pname: {}".format(elem.parameters[param_name]))
print("results:", elem.results)
temp = np.insert(elem.results, 0, elem.parameters[param_name])
try:
param_array = np.row_stack((param_array, temp))
except UnboundLocalError:
param_array = np.array(temp)
if len(param_array.shape) == 1:
print("I don't think you want this file")
return
# append the relative peak error
print('\n', param_array, '\n')
param_array = np.column_stack((param_array, param_array[:, 4] / param_array[:, 3]))
# append the snr
param_array = np.column_stack((param_array, param_array[:, 3] / param_array[:, 2]))
try:
param_array = param_array[param_array[:, 0].argsort()]
except:
print("param_array shape", param_array.shape)
raise
try:
os.mkdir(folder_name)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
file_name = file_name + '.txt'
origin_import1 = param_name + ",Noise,Noise,Signal,error,rel peak error,peak signal-to-noise"
# origin_import1 = param_name + ",Noise,Noise,Noise,Noise,Ratio"
origin_import2 = ",counts,counts,counts,counts,,"
# origin_import2 = ",counts,counts,counts,,"
origin_import3 = ",High noise region,Low noise region,{},{} error,{} rel error, {}".format(name, name, name, name)
# origin_import3 = ",High noise region,Low noise region 1,Low noise region 2,Low noise region 3,High/low"
header_total = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
# print "Spec header: ", spec_header
print("the param_array is:", param_array)
np.savetxt(os.path.join(folder_name, file_name), param_array, delimiter=',',
header=header_total, comments='', fmt='%0.6e')
print("Saved the file.\nDirectory: {}".format(os.path.join(folder_name, file_name)))
class HighSidebandCCD(CCD):
def __init__(self, hsg_thing, parameter_dict=None, spectrometer_offset=None):
"""
This will read the appropriate file. The header needs to be fixed to
reflect the changes to the output header from the Andor file. Because
another helper file will do the cleaning and background subtraction,
those are no longer part of this init. This also turns all wavelengths
from nm (NIR ones) or cm-1 (THz ones) into eV.
OR, if an array is thrown in there, it'll handle the array and dict
Input:
For post-processing analysis:
hsg_thing = file name of the hsg spectrum from CCD superclass
spectrometer_offset = number of nanometers the spectrometer is off by,
should be 0.0...but can be 0.2 or 1.0
For Live-software:
hsg_thing = np array of spectrum from camera
parameter_dict = equipment dict generated by software
Internal:
self.hsg_thing = the filename
self.parameters = string with all the relevant experimental perameters
self.description = the description we added to the file as the data
was being taken
self.proc_data = processed data that has gone is frequency vs counts/pulse
self.dark_stdev = this is not currently handled appropriately
self.addenda = the list of things that have been added to the file, in
form of [constant, *spectra_added]
self.subtrahenda = the list of spectra that have been subtracted from
the file. Constant subtraction is dealt with with
self.addenda
:param hsg_thing: file name for the file to be opened. OR the actually hsg np.ndarray. Fun!
:type hsg_thing: str OR np.ndarray
:param parameter_dict: If being loaded through the data acquisition GUI, throw the dict in here
:type parameter_dict: dict
:param spectrometer_offset: Number of nm the spectrometer is off by
:type spectrometer_offset: float
:return: None, technically
"""
if isinstance(hsg_thing, str):
super(HighSidebandCCD, self).__init__(hsg_thing, spectrometer_offset=spectrometer_offset)
# TODO: fix addenda bullshit
self.addenda = []
self.subtrahenda = []
elif isinstance(hsg_thing, np.ndarray):
self.parameters = parameter_dict.copy() # Probably shouldn't shoehorn this in this way
self.addenda = []
self.subtrahenda = []
self.ccd_data = np.array(hsg_thing)
self.ccd_data[:, 0] = 1239.84 / self.ccd_data[:, 0]
# This data won't have an error column, so attached a column of ones
self.ccd_data = np.column_stack((self.ccd_data, np.ones_like(self.ccd_data[:,1])))
self.ccd_data = np.flipud(self.ccd_data) # Because turning into eV switches direction
self.fname = "Live Data"
else:
raise Exception("I don't know what this file type is {}, type: {}".format(
hsg_thing, type(hsg_thing)
))
self.proc_data = np.array(self.ccd_data)
# proc_data is now a 1600 long array with [frequency (eV), signal (counts / FEL pulse), S.E. of signal mean]
# self.parameters["nir_freq"] = 1239.84 / float(self.parameters["nir_lambda"])
self.parameters["nir_freq"] = 1239.84 / float(self.parameters.get("nir_lambda", -1))
# self.parameters["thz_freq"] = 0.000123984 * float(self.parameters["fel_lambda"])
self.parameters["thz_freq"] = 0.000123984 * float(self.parameters.get("fel_lambda", -1))
# self.parameters["nir_power"] = float(self.parameters["nir_power"])
self.parameters["nir_power"] = float(self.parameters.get("nir_power", -1))
try: # This is the new way of doing things. Also, now it's power
self.parameters["thz_energy"] = float(self.parameters["pulseEnergies"]["mean"])
self.parameters["thz_energy_std"] = float(self.parameters["pulseEnergies"]["std"])
except: # This is the old way TODO: DEPRECATE THIS
self.parameters["thz_energy"] = float(self.parameters.get("fel_power", -1))
# things used in fitting/guessing
self.sb_list = np.array([])
self.sb_index = np.array([])
self.sb_dict = {}
self.sb_results = np.array([])
self.full_dict = {}
def __add__(self, other):
"""
Add together the image data from self.proc_data, or add a constant to
that np.array. It will then combine the addenda and subtrahenda lists,
as well as add the fel_pulses together. If type(other) is a CCD object,
then it will add the errors as well.
Input:
self = CCD-like object
other = int, float or CCD object
Internal:
ret.proc_data = the self.proc_data + other(.proc_data)
ret.addenda = combination of two input addenda lists
This raises a FutureWarning because these were designed early on and
haven't been used much.
:param other: The thing to be added, it's either a int/float or a HighSidebandCCD object
:type other: int/float or HighSidebandCCD
:return: Sum of self and other
:rtype: HighSidebandCCD
"""
raise FutureWarning
ret = copy.deepcopy(self)
# Add a constant offset to the data
if type(other) in (int, float):
ret.proc_data[:, 1] = self.proc_data[:, 1] + other
ret.addenda[0] = ret.addenda[0] + other
# or add the data of two hsg_spectra together
else:
if np.isclose(ret.parameters['center_lambda'], other.parameters['center_lambda']):
ret.proc_data[:, 1] = self.proc_data[:, 1] + other.proc_data[:, 1]
ret.proc_data[:, 2] = np.sqrt(self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)
ret.addenda[0] = ret.addenda[0] + other.addenda[0]
ret.addenda.extend(other.addenda[1:])
ret.subtrahenda.extend(other.subtrahenda)
ret.parameters['fel_pulses'] += other.parameters['fel_pulses']
else:
raise Exception('Source: Spectrum.__add__:\nThese are not from the same grating settings')
return ret
def __sub__(self, other):
"""
This subtracts constants or other data sets between self.proc_data. I
think it even keeps track of what data sets are in the file and how
they got there.
See how __add__ works for more information.
This raises a FutureWarning because these were designed early on and
haven't been used much.
:param other: The thing to be subtracted, it's either a int/float or a HighSidebandCCD object
:type other: int/float or HighSidebandCCD
:return: Sum of self and other
:rtype: HighSidebandCCD
"""
raise FutureWarning
ret = copy.deepcopy(self)
# Subtract a constant offset to the data
if type(other) in (int, float):
ret.proc_data[:, 1] = self.proc_data[:, 1] - other # Need to choose a name
ret.addenda[0] = ret.addenda[0] - other
# Subtract the data of two hsg_spectra from each other
else:
if np.isclose(ret.proc_data[0, 0], other.proc_data[0, 0]):
ret.proc_data[:, 1] = self.proc_data[:, 1] - other.proc_data[:, 1]
ret.proc_data[:, 2] = np.sqrt(self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)
ret.subtrahenda.extend(other.addenda[1:])
ret.addenda.extend(other.subtrahenda)
else:
raise Exception('Source: Spectrum.__sub__:\nThese are not from the same grating settings')
return ret
def __repr__(self):
base = """
fname: {},
Series: {series},
spec_step: {spec_step},
fel_lambda: {fel_lambda},
nir_lambda: {nir_lambda}""".format(os.path.basename(self.fname),**self.parameters)
return base
__str__ = __repr__
def calc_approx_sb_order(self, test_nir_freq):
"""
This simple method will simply return a float approximating the order
of the frequency input. We need this because the CCD wavelength
calibration is not even close to perfect. And it shifts by half a nm
sometimes.
:param test_nir_freq: the frequency guess of the nth sideband
:type test_nir_freq: float
:return: The approximate order of the sideband in question
:rtype: float
"""
nir_freq = self.parameters['nir_freq']
thz_freq = self.parameters['thz_freq']
# If thz = 0, prevent error
if not thz_freq: thz_freq = 1
approx_order = (test_nir_freq - nir_freq) / thz_freq
return approx_order
def guess_sidebands(self, cutoff=4.5, verbose=False, plot=False, **kwargs):
"""
Update 05/24/18:
Hunter had two different loops for negative order sidebands,
then positive order sidebands. They're done pretty much identically,
so I've finally merged them into one.
Finds the locations of all the sidebands in the proc_data array to be
able to seed the fitting method. This works by finding the maximum data
value in the array and guessing what sideband it is. It creates an array
that includes this information. It will then step down, initially by one
THz frequency, then by twos after it hasn't found any odd ones. It then
goes up from the max and finds everything above in much the same way.
There is currently no rhyme or reason to a cutoff of 8. I don't know what
it should be changed to, though.
Input:
cutoff = signal-to-noise threshold to count a sideband candidate.
kwargs:
window_size: how big of a window (in pixels) to use for checking for
sidebands. Specified in half-width
default: 15
Internal:
self.sb_list = List of all of the orders the method found
self.sb_index = index of all of the peaks of the sidebands
self.sb_guess = three-part list including the frequency, amplitude and
error guesses for each sideband
"""
# TODO: this isn't commented appropriately. Will it be made more readable first?
if "cutoff" in self.parameters:
cutoff = self.parameters["cutoff"]
else:
self.parameters['cutoff for guess_sidebands'] = cutoff
if verbose:
print("=" * 15)
print()
print("Guessing CCD Sideband parameters")
print(os.path.basename(self.fname))
print("\tCutoff = {}".format(cutoff))
print()
print("=" * 15)
x_axis = | np.array(self.proc_data[:, 0]) | numpy.array |
import numpy as np
import tensorly as tl
tl.set_backend('pytorch')
def count_cp4_parameters(tensor_shape,
rank = 8):
cout, cin, kh, kw = tensor_shape
cp4_count = rank * (cin + kh + kw + cout)
return cp4_count
def count_cp3_parameters(tensor_shape,
rank = 8):
cout, cin, kh, kw = tensor_shape
cp3_count = rank * (cin + kh*kw + cout)
return cp3_count
def count_tucker2_parameters(tensor_shape,
ranks = [8,8]):
cout, cin, kh, kw = tensor_shape
if type(ranks)!=list or type(ranks)!=tuple:
ranks = [ranks, ranks]
tucker2_count = ranks[-2]*cin + np.prod(ranks[-2:])*kh*kw + ranks[-1]*cout
return np.array(tucker2_count)
def count_parameters(tensor_shape,
rank = None,
key = 'cp3'):
cout, cin, kh, kw = tensor_shape
if key == 'cp3':
params_count = count_cp3_parameters(tensor_shape, rank=rank)
elif key == 'tucker2':
params_count = count_tucker2_parameters(tensor_shape, ranks=rank)
return params_count
def estimate_rank_for_compression_rate(tensor_shape,
rate = 2,
key = 'tucker2'):
initial_count = | np.prod(tensor_shape) | numpy.prod |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Update a simple plot as rapidly as possible to measure speed.
"""
## Add path to library (just for examples; you do not need this)
import initExample
from collections import deque
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
from time import perf_counter
app = pg.mkQApp("Plot Speed Test")
p = pg.plot()
p.setWindowTitle('pyqtgraph example: PlotSpeedTest')
p.setRange(QtCore.QRectF(0, -10, 5000, 20))
p.setLabel('bottom', 'Index', units='B')
curve = p.plot()
data = | np.random.normal(size=(50, 5000)) | numpy.random.normal |
"""Class for calibrating the color-based red-sequence model.
"""
from __future__ import division, absolute_import, print_function
from past.builtins import xrange
import os
import numpy as np
import fitsio
import time
from scipy.optimize import least_squares
from ..configuration import Configuration
from ..fitters import MedZFitter, RedSequenceFitter, RedSequenceOffDiagonalFitter, CorrectionFitter
from ..redsequence import RedSequenceColorPar
from ..color_background import ColorBackground
from ..galaxy import GalaxyCatalog
from ..catalog import Catalog, Entry
from ..zred_color import ZredColor
from ..utilities import make_nodes, CubicSpline, interpol
class RedSequenceCalibrator(object):
"""
Class for calibrating the color-based red-sequence model.
Requires an input galfile that has the following fields:
z: host cluster redshift
pcol: probability of membership using color/luminosity
p: probability of membership using color/luminosity/radial filter
refmag: total magnitude in the reference band
mag: magnitude array
mag_err: magnitude error array
"""
def __init__(self, conf, galfile):
"""
Instantiate a RedSequenceCalibrator.
Parameters
----------
conf: `str` or `redmapper.Configuration`
Configuration yaml file or configuration object
galfile: `str`
Galaxy file with the required fields
"""
if not isinstance(conf, Configuration):
self.config = Configuration(conf)
else:
self.config = conf
self._galfile = galfile
def run(self, doRaise=True):
"""
Run the red-sequence calibration.
Parameters
----------
doRaise: `bool`, optional
Raise an error if background cannot be computed for any galaxies
Default is True. Can be set to False for certain testing.
"""
gals = GalaxyCatalog.from_galfile(self._galfile)
if self.config.calib_use_pcol:
use, = np.where((gals.z > self.config.zrange[0]) &
(gals.z < self.config.zrange[1]) &
(gals.pcol > self.config.calib_pcut))
else:
use, = np.where((gals.z > self.config.zrange[0]) &
(gals.z < self.config.zrange[1]) &
(gals.p > self.config.calib_pcut))
if use.size == 0:
raise RuntimeError("No good galaxies in %s!" % (self._galfile))
gals = gals[use]
nmag = self.config.nmag
ncol = nmag - 1
# Reference mag nodes for pivot
pivotnodes = make_nodes(self.config.zrange, self.config.calib_pivotmag_nodesize)
# Covmat nodes
covmatnodes = make_nodes(self.config.zrange, self.config.calib_covmat_nodesize)
# correction nodes
corrnodes = make_nodes(self.config.zrange, self.config.calib_corr_nodesize)
# correction slope nodes
corrslopenodes = make_nodes(self.config.zrange, self.config.calib_corr_slope_nodesize)
# volume factor (hard coded)
volnodes = make_nodes(self.config.zrange, 0.01)
# Start building the par dtype
dtype = [('pivotmag_z', 'f4', pivotnodes.size),
('pivotmag', 'f4', pivotnodes.size),
('minrefmag', 'f4', pivotnodes.size),
('maxrefmag', 'f4', pivotnodes.size),
('medcol', 'f4', (pivotnodes.size, ncol)),
('medcol_width', 'f4', (pivotnodes.size, ncol)),
('covmat_z', 'f4', covmatnodes.size),
('sigma', 'f4', (ncol, ncol, covmatnodes.size)),
('covmat_amp', 'f4', (ncol, ncol, covmatnodes.size)),
('covmat_slope', 'f4', (ncol, ncol, covmatnodes.size)),
('corr_z', 'f4', corrnodes.size),
('corr', 'f4', corrnodes.size),
('corr_slope_z', 'f4', corrslopenodes.size),
('corr_slope', 'f4', corrslopenodes.size),
('corr_r', 'f4', corrslopenodes.size),
('corr2', 'f4', corrnodes.size),
('corr2_slope', 'f4', corrslopenodes.size),
('corr2_r', 'f4', corrslopenodes.size),
('volume_factor_z', 'f4', volnodes.size),
('volume_factor', 'f4', volnodes.size)]
# And for each color, make the nodes
node_dict = {}
self.ztag = [None] * ncol
self.ctag = [None] * ncol
self.zstag = [None] * ncol
self.stag = [None] * ncol
for j in xrange(ncol):
self.ztag[j] = 'z%02d' % (j)
self.ctag[j] = 'c%02d' % (j)
self.zstag[j] = 'zs%02d' % (j)
self.stag[j] = 'slope%02d' % (j)
node_dict[self.ztag[j]] = make_nodes(self.config.zrange, self.config.calib_color_nodesizes[j],
maxnode=self.config.calib_color_maxnodes[j])
node_dict[self.zstag[j]] = make_nodes(self.config.zrange, self.config.calib_slope_nodesizes[j],
maxnode=self.config.calib_color_maxnodes[j])
dtype.extend([(self.ztag[j], 'f4', node_dict[self.ztag[j]].size),
(self.ctag[j], 'f4', node_dict[self.ztag[j]].size),
(self.zstag[j], 'f4', node_dict[self.zstag[j]].size),
(self.stag[j], 'f4', node_dict[self.zstag[j]].size)])
# Make the pars ... and fill them with the defaults
self.pars = Entry(np.zeros(1, dtype=dtype))
self.pars.pivotmag_z = pivotnodes
self.pars.covmat_z = covmatnodes
self.pars.corr_z = corrnodes
self.pars.corr_slope_z = corrslopenodes
self.pars.volume_factor_z = volnodes
for j in xrange(ncol):
self.pars._ndarray[self.ztag[j]] = node_dict[self.ztag[j]]
self.pars._ndarray[self.zstag[j]] = node_dict[self.zstag[j]]
# And a special subset of color galaxies
if self.config.calib_use_pcol:
coluse, = np.where(gals.pcol > self.config.calib_color_pcut)
else:
coluse, = np.where(gals.p > self.config.calib_color_pcut)
colgals = gals[coluse]
# And a placeholder zredstr which allows us to do stuff
self.zredstr = RedSequenceColorPar(None, config=self.config)
# And read the color background
self.bkg = ColorBackground(self.config.bkgfile_color)
# And prepare for luptitude corrections
if self.config.b[0] == 0.0:
self.do_lupcorr = False
else:
self.do_lupcorr = True
self.bnmgy = self.config.b * 1e9
self.lupzp = 22.5
# Compute pivotmags
self._calc_pivotmags(colgals)
# Compute median colors
self._calc_medcols(colgals)
# Compute diagonal parameters
self._calc_diagonal_pars(gals, doRaise=doRaise)
# Compute off-diagonal parameters
self._calc_offdiagonal_pars(gals, doRaise=doRaise)
# Compute volume factor
self._calc_volume_factor(self.config.zrange[1])
# Write out the parameter file
self.save_pars(self.config.parfile, clobber=False)
# Compute zreds without corrections
# Later will want this parallelized, I think
self._calc_zreds(gals, do_correction=False)
# Compute correction (mode1)
self._calc_corrections(gals)
# Compute correction (mode2)
self._calc_corrections(gals, mode2=True)
# And re-save the parameter file
self.save_pars(self.config.parfile, clobber=True)
# Recompute zreds with corrections
# Later will want this parallelized, I think
self._calc_zreds(gals, do_correction=True)
# And want to save galaxies and zreds
zredfile = os.path.join(self.config.outpath, os.path.basename(self._galfile.rstrip('.fit') + '_zreds.fit'))
gals.to_fits_file(zredfile)
# Make diagnostic plots
self._make_diagnostic_plots(gals)
def _compute_startvals(self, nodes, z, val, xval=None, err=None, median=False, fit=False, mincomp=3):
"""
Compute the starting fit values using a simple algorithm.
Must select one (and only one) of median=True (median fit) or
fit=True (weighted mean fit).
Parameters
----------
nodes: `np.array`
Float array of redshift nodes
z: `np.array`
Float array of redshifts
val: `np.array`
Float array of values to fit (e.g. refmag, color)
xval: `np.array`, optional
X-axis value for color-magnitude relation if fitting slope.
Usually refmag.
Default is None, which means not fitting a slope.
err: `np.array`, optional
Float array of error on val. Not used if fitting median.
Default is None.
median: `bool`, optional
Perform median fit. Default is False.
fit: `bool`, optional
Perform weighted mean fit. Default is False.
"""
def _linfunc(p, x, y):
return (p[1] + p[0] * x) - y
if (not median and not fit) or (median and fit):
raise RuntimeError("Must select one and only one of median and fit")
if median:
mvals = np.zeros(nodes.size)
scvals = np.zeros(nodes.size)
else:
cvals = np.zeros(nodes.size)
svals = np.zeros(nodes.size)
if err is not None:
if err.size != val.size:
raise ValueError("val and err must be the same length")
# default all to 0.1
evals = np.zeros(nodes.size) + 0.1
else:
evals = None
for i in xrange(nodes.size):
if i == 0:
zlo = nodes[0]
else:
zlo = (nodes[i - 1] + nodes[i]) / 2.
if i == nodes.size - 1:
zhi = nodes[i]
else:
zhi = (nodes[i] + nodes[i + 1]) / 2.
u, = np.where((z > zlo) & (z < zhi))
if u.size < mincomp:
if i > 0:
if median:
mvals[i] = mvals[i - 1]
scvals[i] = scvals[i - 1]
else:
cvals[i] = cvals[i - 1]
svals[i] = svals[i - 1]
if err is not None:
evals[i] = evals[i - 1]
else:
if median:
mvals[i] = np.median(val[u])
scvals[i] = np.median(np.abs(val[u] - mvals[i]))
else:
fit = least_squares(_linfunc, [0.0, 0.0], loss='soft_l1', args=(xval[u], val[u]))
cvals[i] = fit.x[1]
svals[i] = np.clip(fit.x[0], None, 0.0)
if err is not None:
evals[i] = np.median(err[u])
if median:
return mvals, scvals
else:
return cvals, svals, evals
def _compute_single_lupcorr(self, j, cvals, svals, gals, dmags, mags, lups, mind, sign):
"""
Compute the luptitude correction for a single color
Parameters
----------
j: `int`
Color index
cvals: `np.array`
Float array of spline values for color at pivotmag
svals: `np.array`
Float array of slope values
gals: `redmapper.GalaxyCatalog`
Galaxy catalog being fit
dmags: `np.array`
Float array of refmag - pivotmag
mags: `np.array`
2d Float array of true (model) magnitudes
lups: `np.array`
2d Float array of true (model) luptitudes
mind: `int`
magnitude index, currently being worked on.
sign: `int`, -1 or 1
Sign of color; -1 if band is redder than ref_ind,
+1 if band is bluer than ref_ind
Returns
-------
lupcorr: `np.array`
Float array of luptitude color corrections
"""
spl = CubicSpline(self.pars._ndarray[self.ztag[j]], cvals)
cv = spl(gals.z)
spl = CubicSpline(self.pars._ndarray[self.zstag[j]], svals)
sv = spl(gals.z)
mags[:, mind] = mags[:, mind + sign] + sign * (cv + sv * dmags)
flux = 10.**((mags[:, mind] - self.lupzp) / (-2.5))
lups[:, mind] = 2.5 * np.log10(1.0 / self.config.b[mind]) - np.arcsinh(0.5 * flux / self.bnmgy[mind]) / (0.4 * np.log(10.0))
magcol = mags[:, j] - mags[:, j + 1]
lupcol = lups[:, j] - lups[:, j + 1]
lupcorr = lupcol - magcol
return lupcorr
def _calc_pivotmags(self, gals):
"""
Calculate the pivot magnitude parameters.
These are put into self.pars.pivotmag, self.pars.maxrefmag, and
self.pars.minrefmag
Parameters
----------
gals: `redmapper.GalaxyCatalog`
Galaxy catalog with fields required for fit.
"""
self.config.logger.info("Calculating pivot magnitudes...")
# With binning, approximate the positions for starting the fit
pivmags = np.zeros_like(self.pars.pivotmag_z)
for i in xrange(pivmags.size):
pivmags, _ = self._compute_startvals(self.pars.pivotmag_z, gals.z, gals.refmag, median=True)
medfitter = MedZFitter(self.pars.pivotmag_z, gals.z, gals.refmag)
pivmags = medfitter.fit(pivmags)
self.pars.pivotmag = pivmags
# and min and max...
self.pars.minrefmag = self.zredstr.mstar(self.pars.pivotmag_z) - 2.5 * np.log10(30.0)
lval_min = np.clip(self.config.lval_reference - 0.1, 0.001, None)
self.pars.maxrefmag = self.zredstr.mstar(self.pars.pivotmag_z) - 2.5 * np.log10(lval_min)
def _calc_medcols(self, gals):
"""
Calculate the median color spline parameters.
Sets self.pars.medcol, self.pars.medcol_width
Parameters
----------
gals: `redmapper.GalaxyCatalog`
Galaxy catalog with fields required for fit.
"""
self.config.logger.info("Calculating median colors...")
ncol = self.config.nmag - 1
galcolor = gals.galcol
for j in xrange(ncol):
col = galcolor[:, j]
# get the start values
mvals, scvals = self._compute_startvals(self.pars.pivotmag_z, gals.z, col, median=True)
# compute the median
medfitter = MedZFitter(self.pars.pivotmag_z, gals.z, col)
mvals = medfitter.fit(mvals)
# and the scatter
spl = CubicSpline(self.pars.pivotmag_z, mvals)
med = spl(gals.z)
medfitter = MedZFitter(self.pars.pivotmag_z, gals.z, np.abs(col - med))
scvals = medfitter.fit(scvals)
self.pars.medcol[:, j] = mvals
self.pars.medcol_width[:, j] = 1.4826 * scvals
def _calc_diagonal_pars(self, gals, doRaise=True):
"""
Calculate the model parameters and diagonal elements of the covariance
matrix (one color at a time).
Sets self.pars.sigma, self.pars.covmat_amp, self.pars.cXX, self.pars.slopeXX
Parameters
----------
gals: `redmapper.GalaxyCatalog`
Galaxy catalog with fields required for fit.
doRaise: `bool`, optional
Raise if there's a problem with the background? Default is True.
"""
# The main routine to compute the red sequence on the diagonal
ncol = self.config.nmag - 1
galcolor = gals.galcol
galcolor_err = gals.galcol_err
# compute the pivot mags
spl = CubicSpline(self.pars.pivotmag_z, self.pars.pivotmag)
pivotmags = spl(gals.z)
# And set the right probabilities
if self.config.calib_use_pcol:
probs = gals.pcol
else:
probs = gals.p
# Figure out the order of the colors for luptitude corrections
mags = np.zeros((gals.size, self.config.nmag))
if self.do_lupcorr:
col_indices = np.zeros(ncol, dtype=np.int32)
sign_indices = np.zeros(ncol, dtype=np.int32)
mind_indices = np.zeros(ncol, dtype=np.int32)
c=0
for j in xrange(self.config.ref_ind, self.config.nmag):
col_indices[c] = j - 1
sign_indices[c] = -1
mind_indices[c] = j
c += 1
for j in xrange(self.config.ref_ind - 2, -1, -1):
col_indices[c] = j
sign_indices[c] = 1
mind_indices[c] = j
c += 1
lups = np.zeros_like(mags)
mags[:, self.config.ref_ind] = gals.mag[:, self.config.ref_ind]
flux = 10.**((mags[:, self.config.ref_ind] - self.lupzp) / (-2.5))
lups[:, self.config.ref_ind] = 2.5 * | np.log10(1.0 / self.config.b[self.config.ref_ind]) | numpy.log10 |
from math import pi, sqrt
from scipy.special import dawsn
import numpy as np
def is_PD(A):
try:
| np.linalg.cholesky(A) | numpy.linalg.cholesky |
import os
from contextlib import contextmanager
from functools import wraps
import six
import numpy as np
import torch
from torch import nn
from nics_fix_pt import quant
import pytest
def _add_call_counter(func):
@wraps(func)
def _func(*args, **kwargs):
_func.num_calls += 1
return func(*args, **kwargs)
_func.num_calls = 0
return _func
quant.quantitize = _add_call_counter(quant.quantitize)
from nics_fix_pt.quant import quantitize
@contextmanager
def patch_variable(patches):
backup = []
for module, name, value in patches:
backup.append((module, name, getattr(module, name)))
setattr(module, name, value)
yield
for module, name, value in backup:
setattr(module, name, value)
def _cnn_data(device="cuda", batch_size=2):
return (torch.rand(batch_size, 3, 28, 28, dtype=torch.float, device=device),
torch.tensor(np.random.randint(0, high=10, size=batch_size)).long().to(device))
def _supernet_sample_cand(net):
ss = net.search_space
rollout = ss.random_sample()
# arch = [([0, 0, 2, 2, 0, 2, 4, 4], [0, 6, 7, 6, 1, 1, 5, 7]),
# ([1, 1, 0, 0, 1, 2, 2, 2], [7, 2, 2, 1, 7, 4, 3, 7])]
cand_net = net.assemble_candidate(rollout)
return cand_net
BITWIDTH = 8
FIX_METHOD = 1 # auto_fix
def _generate_default_fix_cfg(names, scale=0, bitwidth=8, method=0):
return {n: {
"method": torch.autograd.Variable(torch.IntTensor( | np.array([method]) | numpy.array |
# Develop new version
# Original from #/XF11ID/analysis/Analysis_Pipelines/Develop/chxanalys/chxanalys/chx_correlation.py
# ######################################################################
# Let's change from mask's to indices
########################################################################
"""
This module is for functions specific to spatial correlation in order to tackle the motion of speckles
"""
from __future__ import absolute_import, division, print_function
# from __future__ import absolute_import, division, print_function
from skbeam.core.utils import multi_tau_lags
from skbeam.core.roi import extract_label_indices
from collections import namedtuple
import numpy as np
from scipy.signal import fftconvolve
# for a convenient status bar
try:
from tqdm import tqdm
except ImportError:
def tqdm(iterator):
return iterator
from scipy.fftpack.helper import next_fast_len
def get_cor_region(cor, cij, qid, fitw):
"""YG developed@CHX July/2019, Get a rectangle region of the cor class by giving center and width"""
ceni = cor.centers[qid]
x1, x2, y1, y2 = (
max(0, ceni[0] - fitw),
ceni[0] + fitw,
max(0, ceni[1] - fitw),
ceni[1] + fitw,
)
return cij[qid][x1:x2, y1:y2]
def direct_corss_cor(im1, im2):
"""YG developed@CHX July/2019, directly calculate the cross correlation of two images
Input:
im1: the first image
im2: the second image
Return:
The cross correlation
"""
sx, sy = im1.shape
Nx, Ny = sx // 2, sy // 2
C = np.zeros([2 * Nx, 2 * Ny])
for i in range(-Nx, Nx):
for j in range(-Ny, Ny):
if i == 0:
if j == 0:
d1 = im1[:, :]
d2 = im2[:, :]
elif j < 0:
d1 = im1[:j, :]
d2 = im2[-j:, :]
else: ##j>0
d1 = im1[j:, :]
d2 = im2[:-j, :]
elif i < 0:
if j == 0:
d1 = im1[:, :i]
d2 = im2[:, -i:]
elif j < 0:
d1 = im1[:j, :i]
d2 = im2[-j:, -i:]
else: ##j>0
d1 = im1[j:, :i]
d2 = im2[:-j, -i:]
else: # i>0:
if j == 0:
d1 = im1[:, i:]
d2 = im2[:, :-i]
elif j < 0:
d1 = im1[:j, i:]
d2 = im2[-j:, :-i]
else: ##j>0
d1 = im1[j:, i:]
d2 = im2[:-j, :-i]
# print(i,j)
C[i + Nx, j + Ny] = np.sum(d1 * d2) / (
np.average(d1) * np.average(d2) * d1.size
)
return C.T
class CrossCorrelator2:
"""
Compute a 1D or 2D cross-correlation on data.
This uses a mask, which may be binary (array of 0's and 1's),
or a list of non-negative integer id's to compute cross-correlations
separately on.
The symmetric averaging scheme introduced here is inspired by a paper
from Schatzel, although the implementation is novel in that it
allows for the usage of arbitrary masks. [1]_
Examples
--------
>> ccorr = CrossCorrelator(mask.shape, mask=mask)
>> # correlated image
>> cimg = cc(img1)
or, mask may may be ids
>> cc = CrossCorrelator(ids)
#(where ids is same shape as img1)
>> cc1 = cc(img1)
>> cc12 = cc(img1, img2)
# if img2 shifts right of img1, point of maximum correlation is shifted
# right from correlation center
References
----------
.. [1] Schatzel, Klaus, <NAME>, and <NAME>. "Photon
correlation measurements at large lag times: improving
statistical accuracy." Journal of Modern Optics 35.4 (1988):
711-718.
"""
# TODO : when mask is None, don't compute a mask, submasks
def __init__(self, shape, mask=None, normalization=None, progress_bar=True):
"""
Prepare the spatial correlator for various regions specified by the
id's in the image.
Parameters
----------
shape : 1 or 2-tuple
The shape of the incoming images or curves. May specify 1D or
2D shapes by inputting a 1 or 2-tuple
mask : 1D or 2D np.ndarray of int, optional
Each non-zero integer represents unique bin. Zero integers are
assumed to be ignored regions. If None, creates a mask with
all points set to 1
normalization: string or list of strings, optional
These specify the normalization and may be any of the
following:
'regular' : divide by pixel number
'symavg' : use symmetric averaging
Defaults to ['regular'] normalization
Delete argument wrap as not used. See fftconvolve as this
expands arrays to get complete convolution, IE no need
to expand images of subregions.
"""
if normalization is None:
normalization = ["regular"]
elif not isinstance(normalization, list):
normalization = list([normalization])
self.normalization = normalization
self.progress_bar = progress_bar
if mask is None: # we can do this easily now.
mask = np.ones(shape)
# initialize subregion information for the correlations
# first find indices of subregions and sort them by subregion id
pii, pjj = np.where(mask)
bind = mask[pii, pjj]
ord = np.argsort(bind)
bind = bind[ord]
pii = pii[ord]
pjj = pjj[ord] # sort them all
# make array of pointers into position arrays
pos = np.append(0, 1 + np.where(np.not_equal(bind[1:], bind[:-1]))[0])
pos = np.append(pos, len(bind))
self.pos = pos
self.ids = bind[pos[:-1]]
self.nids = len(self.ids)
sizes = np.array(
[
[
pii[pos[i] : pos[i + 1]].min(),
pii[pos[i] : pos[i + 1]].max(),
pjj[pos[i] : pos[i + 1]].min(),
pjj[pos[i] : pos[i + 1]].max(),
]
for i in range(self.nids)
]
)
self.pii = pii
self.pjj = pjj
self.offsets = sizes[:, 0:3:2].copy()
# WE now have two sets of positions of the subregions
# (pii-offsets[0],pjj-offsets[1]) in subregion and (pii,pjj) in
# images. pos is a pointer such that (pos[i]:pos[i+1])
# are the indices in the position arrays of subregion i.
self.sizes = 1 + (np.diff(sizes)[:, [0, 2]]).copy() # make sizes be for regions
centers = np.array(self.sizes.copy()) // 2
self.centers = centers
if len(self.ids) == 1:
self.centers = self.centers[0, :]
def __call__(self, img1, img2=None, normalization=None, check_res=False):
"""Run the cross correlation on an image/curve or against two
images/curves
Parameters
----------
img1 : 1D or 2D np.ndarray
The image (or curve) to run the cross correlation on
img2 : 1D or 2D np.ndarray
If not set to None, run cross correlation of this image (or
curve) against img1. Default is None.
normalization : string or list of strings
normalization types. If not set, use internally saved
normalization parameters
Returns
-------
ccorrs : 1d or 2d np.ndarray
An image of the correlation. The zero correlation is
located at shape//2 where shape is the 1 or 2-tuple
shape of the array
"""
progress_bar = self.progress_bar
if normalization is None:
normalization = self.normalization
if img2 is None:
self_correlation = True
else:
self_correlation = False
ccorrs = list()
pos = self.pos
# loop over individual regions
if progress_bar:
R = tqdm(range(self.nids))
else:
R = range(self.nids)
for reg in R:
# for reg in tqdm(range(self.nids)): #for py3.5
ii = self.pii[pos[reg] : pos[reg + 1]]
jj = self.pjj[pos[reg] : pos[reg + 1]]
i = ii.copy() - self.offsets[reg, 0]
j = jj.copy() - self.offsets[reg, 1]
# set up size for fft with padding
shape = 2 * self.sizes[reg, :] - 1
fshape = [next_fast_len(int(d)) for d in shape]
# fslice = tuple([slice(0, int(sz)) for sz in shape])
submask = np.zeros(self.sizes[reg, :])
submask[i, j] = 1
mma1 = np.fft.rfftn(submask, fshape) # for mask
# do correlation by ffts
maskcor = np.fft.irfftn(mma1 * mma1.conj(), fshape) # [fslice])
# print(reg, maskcor)
# maskcor = _centered(np.fft.fftshift(maskcor), self.sizes[reg,:]) #make smaller??
maskcor = _centered(maskcor, self.sizes[reg, :]) # make smaller??
# choose some small value to threshold
maskcor *= maskcor > 0.5
tmpimg = np.zeros(self.sizes[reg, :])
tmpimg[i, j] = img1[ii, jj]
im1 = np.fft.rfftn(tmpimg, fshape) # image 1
if self_correlation:
# ccorr = np.real(np.fft.ifftn(im1 * im1.conj(), fshape)[fslice])
ccorr = np.fft.irfftn(im1 * im1.conj(), fshape) # [fslice])
# ccorr = np.fft.fftshift(ccorr)
ccorr = _centered(ccorr, self.sizes[reg, :])
else:
ndim = img1.ndim
tmpimg2 = np.zeros_like(tmpimg)
tmpimg2[i, j] = img2[ii, jj]
im2 = np.fft.rfftn(tmpimg2, fshape) # image 2
ccorr = np.fft.irfftn(im1 * im2.conj(), fshape) # [fslice])
# ccorr = _centered(np.fft.fftshift(ccorr), self.sizes[reg,:])
ccorr = _centered(ccorr, self.sizes[reg, :])
# print('here')
###check here
if check_res:
if reg == 0:
self.norm = maskcor
self.ck = ccorr.copy()
# print(ccorr.max())
self.tmp = tmpimg
self.fs = fshape
###end the check
# now handle the normalizations
if "symavg" in normalization:
mim1 = np.fft.rfftn(tmpimg * submask, fshape)
Icorr = np.fft.irfftn(mim1 * mma1.conj(), fshape) # [fslice])
# Icorr = _centered(np.fft.fftshift(Icorr), self.sizes[reg,:])
Icorr = _centered(Icorr, self.sizes[reg, :])
# do symmetric averaging
if self_correlation:
Icorr2 = np.fft.irfftn(mma1 * mim1.conj(), fshape) # [fslice])
# Icorr2 = _centered(np.fft.fftshift(Icorr2), self.sizes[reg,:])
Icorr2 = _centered(Icorr2, self.sizes[reg, :])
else:
mim2 = np.fft.rfftn(tmpimg2 * submask, fshape)
Icorr2 = np.fft.irfftn(mma1 * mim2.conj(), fshape)
# Icorr2 = _centered(np.fft.fftshift(Icorr2), self.sizes[reg,:])
Icorr2 = _centered(Icorr2, self.sizes[reg, :])
# there is an extra condition that Icorr*Icorr2 != 0
w = np.where(np.abs(Icorr * Icorr2) > 0) # DO WE NEED THIS (use i,j).
ccorr[w] *= maskcor[w] / Icorr[w] / Icorr2[w]
# print 'size:',tmpimg.shape,Icorr.shape
if check_res:
if reg == 0:
self.ckn = ccorr.copy()
if "regular" in normalization:
# only run on overlapping regions for correlation
w = np.where(maskcor > 0.5)
if self_correlation:
ccorr[w] /= maskcor[w] * np.average(tmpimg[w]) ** 2
else:
ccorr[w] /= (
maskcor[w] * np.average(tmpimg[w]) * np.average(tmpimg2[w])
)
if check_res:
if reg == 0:
self.ckn = ccorr.copy()
# print('here')
# print( np.average(tmpimg[w]) )
# print( maskcor[w] )
# print( ccorr.max(), maskcor[w], np.average(tmpimg[w]), np.average(tmpimg2[w]) )
ccorrs.append(ccorr)
if len(ccorrs) == 1:
ccorrs = ccorrs[0]
return ccorrs
def _centered(img, sz):
n = sz // 2
# ind=np.r_[-n[0]:0,0:sz[0]-n[0]]
img = np.take(img, np.arange(-n[0], sz[0] - n[0]), 0, mode="wrap")
# ind=np.r_[-n[1]:0,0:sz[1]-n[1]]
img = np.take(img, np.arange(-n[1], sz[1] - n[1]), 1, mode="wrap")
return img
##define a custmoized fftconvolve
########################################################################################
# modifided version from signaltools.py in scipy (Mark March 2017)
# Author: <NAME>
# 1999 -- 2002
import warnings
import threading
# from . import sigtools
import numpy as np
from scipy._lib.six import callable
from scipy._lib._version import NumpyVersion
from scipy import linalg
from scipy.fftpack import fft, ifft, ifftshift, fft2, ifft2, fftn, ifftn, fftfreq
from numpy.fft import rfftn, irfftn
from numpy import (
allclose,
angle,
arange,
argsort,
array,
asarray,
atleast_1d,
atleast_2d,
cast,
dot,
exp,
expand_dims,
iscomplexobj,
isscalar,
mean,
ndarray,
newaxis,
ones,
pi,
poly,
polyadd,
polyder,
polydiv,
polymul,
polysub,
polyval,
prod,
product,
r_,
ravel,
real_if_close,
reshape,
roots,
sort,
sum,
take,
transpose,
unique,
where,
zeros,
zeros_like,
)
# from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
_rfft_mt_safe = NumpyVersion(np.__version__) >= "1.9.0.dev-e24486e"
_rfft_lock = threading.Lock()
def fftconvolve_new(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT.
Convolve `in1` and `in2` using the fast Fourier transform method, with
the output size determined by the `mode` argument.
This is generally much faster than `convolve` for large arrays (n > ~500),
but can be slower when only a few output values are needed, and can only
output float arrays (int or object array inputs will be cast to float).
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;from scipy.signal import fftconvolve
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.get_window
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
out : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Autocorrelation of white noise is an impulse. (This is at least 100 times
as fast as `convolve`.)
>>> from scipy import signal
>>> sig = np.random.randn(1000)
>>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('White noise')
>>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)
>>> ax_mag.set_title('Autocorrelation')
>>> fig.tight_layout()
>>> fig.show()
Gaussian blur implemented using FFT convolution. Notice the dark borders
around the image, due to the zero-padding beyond its boundaries.
The `convolve2d` function allows for other types of image boundaries,
but is far slower.
>>> from scipy import misc
>>> lena = misc.lena()
>>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8))
>>> blurred = signal.fftconvolve(lena, kernel, mode='same')
>>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(1, 3)
>>> ax_orig.imshow(lena, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_kernel.imshow(kernel, cmap='gray')
>>> ax_kernel.set_title('Gaussian kernel')
>>> ax_kernel.set_axis_off()
>>> ax_blurred.imshow(blurred, cmap='gray')
>>> ax_blurred.set_title('Blurred')
>>> ax_blurred.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return array([])
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = np.issubdtype(in1.dtype, np.complex) or np.issubdtype(
in2.dtype, np.complex
)
shape = s1 + s2 - 1
if mode == "valid":
_check_valid_mode_shapes(s1, s2)
# Speed up FFT by padding to optimal size for FFTPACK
# expand by at least twice+1
fshape = [_next_regular(int(d)) for d in shape]
fslice = tuple([slice(0, int(sz)) for sz in shape])
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)):
try:
ret = irfftn(rfftn(in1, fshape) * rfftn(in2, fshape), fshape)[fslice].copy()
finally:
if not _rfft_mt_safe:
_rfft_lock.release()
else:
# If we're here, it's either because we need a complex result, or we
# failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
# is already in use by another thread). In either case, use the
# (threadsafe but slower) SciPy complex-FFT routines instead.
ret = ifftn(fftn(in1, fshape) * fftn(in2, fshape))[fslice].copy()
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
return _centered(ret, s1)
elif mode == "valid":
return _centered(ret, s1 - s2 + 1)
else:
raise ValueError("Acceptable mode flags are 'valid'," " 'same', or 'full'.")
def _cross_corr1(img1, img2=None):
"""Compute the cross correlation of one (or two) images.
Parameters
----------
img1 : np.ndarray
the image or curve to cross correlate
img2 : 1d or 2d np.ndarray, optional
If set, cross correlate img1 against img2. A shift of img2
to the right of img1 will lead to a shift of the point of
highest correlation to the right.
Default is set to None
"""
ndim = img1.ndim
if img2 is None:
img2 = img1
if img1.shape != img2.shape:
errorstr = "Image shapes don't match. "
errorstr += "(img1 : {},{}; img2 : {},{})".format(*img1.shape, *img2.shape)
raise ValueError(errorstr)
# need to reverse indices for second image
# fftconvolve(A,B) = FFT^(-1)(FFT(A)*FFT(B))
# but need FFT^(-1)(FFT(A(x))*conj(FFT(B(x)))) = FFT^(-1)(A(x)*B(-x))
reverse_index = tuple([slice(None, None, -1) for i in range(ndim)])
imgc = fftconvolve(img1, img2[reverse_index], mode="same")
return imgc
class CrossCorrelator1:
"""
Compute a 1D or 2D cross-correlation on data.
This uses a mask, which may be binary (array of 0's and 1's),
or a list of non-negative integer id's to compute cross-correlations
separately on.
The symmetric averaging scheme introduced here is inspired by a paper
from Schätzel, although the implementation is novel in that it
allows for the usage of arbitrary masks. [1]_
Examples
--------
>> ccorr = CrossCorrelator(mask.shape, mask=mask)
>> # correlated image
>> cimg = cc(img1)
or, mask may may be ids
>> cc = CrossCorrelator(ids)
#(where ids is same shape as img1)
>> cc1 = cc(img1)
>> cc12 = cc(img1, img2)
# if img2 shifts right of img1, point of maximum correlation is shifted
# right from correlation center
References
----------
.. [1] Schätzel, Klaus, <NAME>, and <NAME>. “Photon
correlation measurements at large lag times: improving
statistical accuracy.” Journal of Modern Optics 35.4 (1988):
711-718.
"""
# TODO : when mask is None, don't compute a mask, submasks
def __init__(self, shape, mask=None, normalization=None):
"""
Prepare the spatial correlator for various regions specified by the
id's in the image.
Parameters
----------
shape : 1 or 2-tuple
The shape of the incoming images or curves. May specify 1D or
2D shapes by inputting a 1 or 2-tuple
mask : 1D or 2D np.ndarray of int, optional
Each non-zero integer represents unique bin. Zero integers are
assumed to be ignored regions. If None, creates a mask with
all points set to 1
normalization: string or list of strings, optional
These specify the normalization and may be any of the
following:
'regular' : divide by pixel number
'symavg' : use symmetric averaging
Defaults to ['regular'] normalization
Delete argument wrap as not used. See fftconvolve as this
expands arrays to get complete convolution, IE no need
to expand images of subregions.
"""
if normalization is None:
normalization = ["regular"]
elif not isinstance(normalization, list):
normalization = list([normalization])
self.normalization = normalization
if mask is None: # we can do this easily now.
mask = np.ones(shape)
# initialize subregions information for the correlation
# first find indices of subregions and sort them by subregion id
pii, pjj = np.where(mask)
bind = mask[pii, pjj]
ord = np.argsort(bind)
bind = bind[ord]
pii = pii[ord]
pjj = pjj[ord] # sort them all
# make array of pointers into position arrays
pos = np.append(0, 1 + np.where(np.not_equal(bind[1:], bind[:-1]))[0])
pos = np.append(pos, len(bind))
self.pos = pos
self.ids = bind[pos[:-1]]
self.nids = len(self.ids)
sizes = np.array(
[
[
pii[pos[i] : pos[i + 1]].min(),
pii[pos[i] : pos[i + 1]].max(),
pjj[pos[i] : pos[i + 1]].min(),
pjj[pos[i] : pos[i + 1]].max(),
]
for i in range(self.nids)
]
)
# make indices for subregions arrays and their sizes
pi = pii.copy()
pj = pjj.copy()
for i in range(self.nids):
pi[pos[i] : pos[i + 1]] -= sizes[i, 0]
pj[pos[i] : pos[i + 1]] -= sizes[i, 2]
self.pi = pi
self.pj = pj
self.pii = pii
self.pjj = pjj
sizes = 1 + (np.diff(sizes)[:, [0, 2]]) # make sizes be for regions
self.sizes = sizes.copy() # the shapes of each correlation
# WE now have two sets of positions of the subregions (pi,pj) in subregion
# and (pii,pjj) in images. pos is a pointer such that (pos[i]:pos[i+1])
# is the indices in the position arrays of subregion i.
# Making a list of arrays holding the masks for each id. Ideally, mask
# is binary so this is one element to quickly index original images
self.submasks = list()
self.centers = list()
# the positions of each axes of each correlation
self.positions = list()
self.maskcorrs = list()
# regions where the correlations are not zero
self.pxlst_maskcorrs = list()
# basically saving bunch of mask related stuff like indexing etc, just
# to save some time when actually computing the cross correlations
for id in range(self.nids):
submask = np.zeros(self.sizes[id, :])
submask[pi[pos[id] : pos[id + 1]], pj[pos[id] : pos[id + 1]]] = 1
self.submasks.append(submask)
maskcorr = _cross_corr1(submask)
# quick fix for #if self.wrap is False:
# submask = _expand_image1(submask)finite numbers should be integer so
# choose some small value to threshold
maskcorr *= maskcorr > 0.5
self.maskcorrs.append(maskcorr)
self.pxlst_maskcorrs.append(maskcorr > 0)
# centers are shape//2 as performed by fftshift
center = np.array(maskcorr.shape) // 2
self.centers.append(np.array(maskcorr.shape) // 2)
if mask.ndim == 1:
self.positions.append( | np.arange(maskcorr.shape[0]) | numpy.arange |
# -*- coding: utf-8 -*-
from numpy import real, min as np_min, max as np_max
from numpy.linalg import norm
from ....Classes.MeshMat import MeshMat
from ....definitions import config_dict
from numpy import (
pi,
real,
min as np_min,
max as np_max,
abs as np_abs,
linspace,
exp,
)
from ....Classes.MeshVTK import MeshVTK
from pyleecan.Functions.Plot.Pyvista.configure_plot import configure_plot
from pyleecan.Functions.Plot.Pyvista.plot_surf_deflection import plot_surf_deflection
from pyleecan.Functions.Plot.Pyvista.plot_mesh_field import plot_mesh_field
COLOR_MAP = config_dict["PLOT"]["COLOR_DICT"]["COLOR_MAP"]
FONT_FAMILY_PYVISTA = config_dict["PLOT"]["FONT_FAMILY_PYVISTA"]
def plot_contour(
self,
*args,
label=None,
index=None,
indices=None,
is_surf=False,
is_radial=False,
is_center=False,
clim=None,
field_name=None,
group_names=None,
save_path=None,
itimefreq=0,
is_show_fig=True,
win_title=None,
factor=None,
is_animated=False,
title="",
p=None,
):
"""Plot the contour of a field on a mesh using pyvista plotter.
Parameters
----------
self : MeshSolution
a MeshSolution object
*args: list of strings
List of axes requested by the user, their units and values (optional)
label : str
a label
index : int
an index
indices : list
list of the points to extract (optional)
is_surf : bool
field over outer surface
is_radial : bool
radial component only
is_center : bool
field at cell-centers
clim : list
a list of 2 elements for the limits of the colorbar
field_name : str
title of the field to display on plot
group_names : list
a list of str corresponding to group name(s)
save_path : str
path to save the figure
is_show_fig : bool
To call show at the end of the method
is_animated : True to animate magnetic flux density
Returns
-------
"""
if group_names is not None:
meshsol_grp = self.get_group(group_names)
meshsol_grp.plot_contour(
*args,
label=label,
index=index,
indices=indices,
is_surf=is_surf,
is_radial=is_radial,
is_center=is_center,
clim=clim,
field_name=field_name,
group_names=None,
save_path=save_path,
itimefreq=itimefreq,
is_animated=is_animated,
title=title,
)
else:
# Init figure
if p is None:
if title != "" and win_title == "":
win_title = title
elif win_title != "" and title == "":
title = win_title
p, sargs = configure_plot(p=p, win_title=win_title, save_path=save_path)
p.add_text(
title,
position="upper_edge",
color="black",
font_size=10,
font=FONT_FAMILY_PYVISTA,
)
# Get the mesh_pv and field
mesh_pv, field, field_name = self.get_mesh_field_pv(
*args,
label=label,
index=index,
indices=indices,
is_surf=is_surf,
is_radial=is_radial,
is_center=is_center,
field_name=field_name,
)
# Add field to mesh
# if is_surf:
# surf = mesh_pv.get_surf(indices=indices)
# surf[field_name] = real(field)
# mesh_field = surf
# else:
# mesh_pv[field_name] = real(field)
# mesh_field = mesh_pv
if clim is None:
clim = [np_min(real(field)), np_max(real(field))]
if (clim[1] - clim[0]) / clim[1] < 0.01:
clim[0] = -abs(clim[1])
clim[1] = abs(clim[1])
plot_mesh_field(
p,
sargs,
field_name,
clim=clim,
mesh_pv=mesh_pv,
field=field,
)
###########
# Internal animation (cannot be combined with other plots)
if is_animated:
p.add_text(
'Adjust 3D view and press "Q"',
position="lower_edge",
color="gray",
font_size=10,
font="arial",
)
p.show(auto_close=False)
p.open_gif(save_path)
p.clear()
if len(args) == 0 or "time" in args:
mesh_pv_B, field_B, field_name_B = self.get_mesh_field_pv("time")
nframe = len(field_B)
is_time = True
else:
nframe = 25
mesh_pv_B, field_B, field_name_B = self.get_mesh_field_pv(args)
is_time = False
t = | linspace(0.0, 1.0, nframe + 1) | numpy.linspace |
import random
import numpy as np
import pytest
from _pytest.python_api import approx
from flaky import flaky
from dowhy.gcm.independence_test import kernel_based, approx_kernel_based
from dowhy.gcm.independence_test.kernel import _fast_centering
@pytest.fixture
def preserve_random_generator_state():
numpy_state = np.random.get_state()
random_state = random.getstate()
yield
np.random.set_state(numpy_state)
random.setstate(random_state)
@flaky(max_runs=5)
def test_kernel_based_conditional_independence_test_independent():
z = np.random.randn(1000, 1)
x = np.exp(z + np.random.rand(1000, 1))
y = np.exp(z + np.random.rand(1000, 1))
assert kernel_based(x, y, z) > 0.05
@flaky(max_runs=5)
def test_kernel_based_based_conditional_independence_test_dependent():
z = np.random.randn(1000, 1)
w = np.random.randn(1000, 1)
x = np.exp(z + np.random.rand(1000, 1))
y = np.exp(z + np.random.rand(1000, 1))
assert 0.05 > kernel_based(x, y, w)
@flaky(max_runs=5)
def test_kernel_based_conditional_independence_test_categorical_independent():
x, y, z = generate_categorical_data()
assert kernel_based(x, y, z) > 0.05
@flaky(max_runs=5)
def test_kernel_based_conditional_independence_test_categorical_dependent():
x, y, z = generate_categorical_data()
assert kernel_based(x, z, y) < 0.05
@flaky(max_runs=2)
def test_kernel_based_conditional_independence_test_with_random_seed(preserve_random_generator_state):
z = np.random.randn(1000, 1)
x = np.exp(z + np.random.rand(1000, 1))
y = np.exp(z + np.random.rand(1000, 1))
assert kernel_based(x, z, y,
bootstrap_num_samples_per_run=5,
bootstrap_num_runs=2,
p_value_adjust_func=np.mean) \
!= kernel_based(x, z, y,
bootstrap_num_samples_per_run=5,
bootstrap_num_runs=2,
p_value_adjust_func=np.mean)
np.random.seed(0)
result_1 = kernel_based(x, z, y,
bootstrap_num_samples_per_run=5,
bootstrap_num_runs=2,
p_value_adjust_func=np.mean)
np.random.seed(0)
result_2 = kernel_based(x, z, y,
bootstrap_num_samples_per_run=5,
bootstrap_num_runs=2,
p_value_adjust_func=np.mean)
assert result_1 == result_2
def test_kernel_based_pairwise_independence_test_raises_error_when_too_few_samples():
with pytest.raises(RuntimeError):
kernel_based(np.array([1, 2, 3, 4]),
np.array([1, 3, 2, 4]))
@flaky(max_runs=5)
def test_kernel_based_pairwise_independence_test_independent():
z = np.random.randn(1000, 1)
w = np.random.randn(1000, 1)
x = np.exp(z + np.random.rand(1000, 1))
assert kernel_based(x, w) > 0.05
@flaky(max_runs=5)
def test_kernel_based_pairwise_independence_test_dependent():
z = np.random.randn(1000, 1)
x = np.exp(z + np.random.rand(1000, 1))
y = np.exp(z + np.random.rand(1000, 1))
assert kernel_based(x, y) < 0.05
@flaky(max_runs=5)
def test_kernel_based_pairwise_independence_test_categorical_independent():
x = np.random.normal(0, 1, 1000)
y = (np.random.choice(2, 1000) == 1).astype(str)
assert kernel_based(x, y) > 0.05
@flaky(max_runs=5)
def test_kernel_based_pairwise_independence_test_categorical_dependent():
x = np.random.normal(0, 1, 1000)
y = []
for v in x:
if v > 0:
y.append(0)
else:
y.append(1)
y = np.array(y).astype(str)
assert kernel_based(x, y) < 0.05
@flaky(max_runs=2)
def test_kernel_based_pairwise_independence_test_with_random_seed(preserve_random_generator_state):
x = np.random.randn(1000, 1)
y = x + np.random.randn(1000, 1)
assert kernel_based(x, y,
bootstrap_num_samples_per_run=10,
bootstrap_num_runs=2,
p_value_adjust_func=np.mean) \
!= kernel_based(x, y,
bootstrap_num_samples_per_run=10,
bootstrap_num_runs=2,
p_value_adjust_func=np.mean)
np.random.seed(0)
result_1 = kernel_based(x, y, bootstrap_num_samples_per_run=10,
bootstrap_num_runs=2,
p_value_adjust_func=np.mean)
np.random.seed(0)
result_2 = kernel_based(x, y, bootstrap_num_samples_per_run=10,
bootstrap_num_runs=2,
p_value_adjust_func=np.mean)
assert result_1 == result_2
def test_kernel_based_pairwise_with_constant():
assert kernel_based(np.random.normal(0, 1, (1000, 2)), np.array([5] * 1000)) != np.nan
@flaky(max_runs=5)
def test_approx_kernel_based_conditional_independence_test_independent():
z = np.random.randn(1000, 1)
x = np.exp(z + np.random.rand(1000, 1))
y = np.exp(z + np.random.rand(1000, 1))
assert approx_kernel_based(x, y, z) > 0.05
@flaky(max_runs=5)
def test_approx_kernel_based_conditional_independence_test_dependent():
z = | np.random.randn(1000, 1) | numpy.random.randn |
import numpy as np
from datetime import datetime as py_dtime
from datetime import timedelta
import pandas as pd
import requests
import re
from bs4 import BeautifulSoup as bs4
from bqplot import LinearScale, Axis, Lines, Figure, DateScale
from bqplot.interacts import HandDraw
from ipywidgets import widgets
from IPython.display import display
import locale
import warnings
warnings.filterwarnings('ignore')
locale.setlocale(locale.LC_ALL, '')
# --- MACHINE COSTS ---
resp = requests.get('https://cloud.google.com/compute/pricing')
html = bs4(resp.text)
# Munge the cost data
def clean_promo(in_value, use_promo=False):
# cleans listings with promotional pricing
# defaults to non-promo pricing with use_promo
if in_value.find("promo") > -1:
if use_promo:
return re.search("\d+\.\d+", in_value)[0]
else:
return re.search("\d+\.\d+", in_value[in_value.find("("):])[0]
else:
return in_value
all_dfs = []
for table in html.find_all('table'):
header = table.find('thead').find_all('th')
header = [item.text for item in header]
data = table.find('tbody').find_all('tr')
rows = []
for ii in data:
thisrow = []
for jj in ii.find_all('td'):
if 'default' in jj.attrs.keys():
thisrow.append(jj.attrs['default'])
elif 'ore-hourly' in jj.attrs.keys():
thisrow.append(clean_promo(jj.attrs['ore-hourly'].strip('$')))
elif 'ore-monthly' in jj.attrs.keys():
thisrow.append(clean_promo(jj.attrs['ore-monthly'].strip('$')))
else:
thisrow.append(jj.text.strip())
rows.append(thisrow)
df = pd.DataFrame(rows[:-1], columns=header)
all_dfs.append(df)
# Pull out our reference dataframes
disk = [df for df in all_dfs if 'Price (per GB / month)' in df.columns][0]
machines_list = pd.concat([df for df in all_dfs if 'Machine type' in df.columns]).dropna()
machines_list = machines_list.drop('Preemptible price (USD)', axis=1)
machines_list = machines_list.rename(columns={'Price (USD)': 'Price (USD / hr)'})
active_machine = machines_list.iloc[0]
# Base costs, all per day
disk['Price (per GB / month)'] = disk['Price (per GB / month)'].astype(float)
cost_storage_hdd = disk[disk['Type'] == 'Standard provisioned space']['Price (per GB / month)'].values[0]
cost_storage_hdd /= 30. # To make it per day
cost_storage_ssd = disk[disk['Type'] == 'SSD provisioned space']['Price (per GB / month)'].values[0]
cost_storage_ssd /= 30. # To make it per day
storage_cost = {False: 0, 'ssd': cost_storage_ssd, 'hdd': cost_storage_hdd}
# --- WIDGET ---
date_start = py_dtime(2017, 1, 1, 0)
n_step_min = 2
def autoscale(y, window_minutes=30, user_buffer=10):
# Weights for the autoscaling
weights = | np.logspace(0, 2, window_minutes) | numpy.logspace |
import numpy as np
from shapreg import utils, games, stochastic_games
from tqdm.auto import tqdm
def default_min_variance_samples(game):
'''Determine min_variance_samples.'''
return 5
def default_variance_batches(game, batch_size):
'''
Determine variance_batches.
This value tries to ensure that enough samples are included to make A
approximation non-singular.
'''
if isinstance(game, games.CooperativeGame):
return int( | np.ceil(10 * game.players / batch_size) | numpy.ceil |
import math
import numpy as np
import scipy.sparse as sp
import torch
def calc_mag_gso(dir_adj, gso_type, q):
if sp.issparse(dir_adj):
id = sp.identity(dir_adj.shape[0], format='csc')
# Symmetrizing an adjacency matrix
adj = dir_adj + dir_adj.T.multiply(dir_adj.T > dir_adj) - dir_adj.multiply(dir_adj.T > dir_adj)
#adj = 0.5 * (dir_adj + dir_adj.transpose())
if q != 0:
dir = dir_adj.transpose() - dir_adj
trs = np.exp(1j * 2 * np.pi * q * dir.toarray())
trs = sp.csc_matrix(trs)
else:
trs = id # Fake
if gso_type == 'sym_renorm_mag_adj' or gso_type == 'rw_renorm_mag_adj' \
or gso_type == 'neg_sym_renorm_mag_adj' or gso_type == 'neg_rw_renorm_mag_adj' \
or gso_type == 'sym_renorm_mag_lap' or gso_type == 'rw_renorm_mag_lap':
adj = adj + id
if gso_type == 'sym_norm_mag_adj' or gso_type == 'sym_renorm_mag_adj' \
or gso_type == 'neg_sym_norm_mag_adj' or gso_type == 'neg_sym_renorm_mag_adj' \
or gso_type == 'sym_norm_mag_lap' or gso_type == 'sym_renorm_mag_lap':
row_sum = adj.sum(axis=1).A1
row_sum_inv_sqrt = np.power(row_sum, -0.5)
row_sum_inv_sqrt[np.isinf(row_sum_inv_sqrt)] = 0.
deg_inv_sqrt = sp.diags(row_sum_inv_sqrt, format='csc')
# A_{sym} = D^{-0.5} * A * D^{-0.5}
sym_norm_adj = deg_inv_sqrt.dot(adj).dot(deg_inv_sqrt)
if q == 0:
sym_norm_mag_adj = sym_norm_adj
elif q == 0.5:
sym_norm_mag_adj = sym_norm_adj.multiply(trs.real)
else:
sym_norm_mag_adj = sym_norm_adj.multiply(trs)
if gso_type == 'neg_sym_norm_mag_adj' or gso_type == 'neg_sym_renorm_mag_adj':
gso = -1 * sym_norm_mag_adj
elif gso_type == 'sym_norm_mag_lap' or gso_type == 'sym_renorm_mag_lap':
sym_norm_mag_lap = id - sym_norm_mag_adj
gso = sym_norm_mag_lap
else:
gso = sym_norm_mag_adj
elif gso_type == 'rw_norm_mag_adj' or gso_type == 'rw_renorm_mag_adj' \
or gso_type == 'neg_rw_norm_mag_adj' or gso_type == 'neg_rw_renorm_mag_adj' \
or gso_type == 'rw_norm_mag_lap' or gso_type == 'rw_renorm_mag_lap':
row_sum = adj.sum(axis=1).A1
row_sum_inv = np.power(row_sum, -1)
row_sum_inv[np.isinf(row_sum_inv)] = 0.
deg_inv = sp.diags(row_sum_inv, format='csc')
# A_{rw} = D^{-1} * A
rw_norm_adj = deg_inv.dot(adj)
if q == 0:
rw_norm_mag_adj = rw_norm_adj
elif q == 0.5:
rw_norm_mag_adj = rw_norm_adj.multiply(trs.real)
else:
rw_norm_mag_adj = rw_norm_adj.multiply(trs)
if gso_type == 'neg_rw_norm_mag_adj' or gso_type == 'neg_rw_renorm_mag_adj':
gso = -1 * rw_norm_mag_adj
elif gso_type == 'rw_norm_mag_lap' or gso_type == 'rw_renorm_mag_lap':
rw_norm_mag_lap = id - rw_norm_mag_adj
gso = rw_norm_mag_lap
else:
gso = rw_norm_mag_adj
else:
raise ValueError(f'{gso_type} is not defined.')
else:
id = np.identity(dir_adj.shape[0])
# Symmetrizing an adjacency matrix
adj = np.maximum(dir_adj, dir_adj.T)
#adj = 0.5 * (dir_adj + dir_adj.T)
if q != 0:
dir = dir_adj.T - dir_adj
trs = | np.exp(1j * 2 * np.pi * q * dir) | numpy.exp |
from hierarc.Likelihood.LensLikelihood.ddt_hist_likelihood import DdtHistLikelihood, DdtHistKDELikelihood
import numpy as np
import numpy.testing as npt
import unittest
def log_likelihood(hist_obj, mean, sig_factor):
logl_max = hist_obj.log_likelihood(ddt=mean, dd=None)
npt.assert_almost_equal(logl_max, 0, decimal=1)
logl_sigma = hist_obj.log_likelihood(ddt=mean*(1+sig_factor), dd=None)
| npt.assert_almost_equal(logl_sigma-logl_max, -1/2., decimal=1) | numpy.testing.assert_almost_equal |
#!/usr/bin/python3
def inv_rank(m, tol=1E-8, method='auto', logger=None, mpc=0, qr=0, **ka):
"""Computes matrix (pseudo-)inverse and rank with SVD.
Eigenvalues smaller than tol*largest eigenvalue are set to 0. Rank of inverted matrix is also returned. Provides to limit the number of eigenvalues to speed up computation. Broadcasts to the last 2 dimensions of the matrix.
Parameters
------------
m: numpy.ndarray(shape=(...,n,n),dtype=float)
2-D or higher matrix to be inverted
tol: float
Eigenvalues < tol*maximum eigenvalue are treated as zero.
method: str
Method to compute eigenvalues:
* auto: Uses scipy for n<mpc or mpc==0 and sklearn otherwise
* scipy: Uses scipy.linalg.svd
* scipys: NOT IMPLEMENTED. Uses scipy.sparse.linalg.svds
* sklearn: Uses sklearn.decomposition.TruncatedSVD
logger: object
Logger to output warning. Defaults (None) to logging module
mpc: int
Maximum rank or number of eigenvalues/eigenvectors to consider.
Defaults to 0 to disable limit.
For very large input matrix, use a small value (e.g. 500) to save time at the cost of accuracy.
qr: int
Whether to use QR decomposition for matrix inverse.
Only effective when method=sklearn, or =auto that defaults to sklearn.
* 0: No
* 1: Yes with default settings
* 2+: Yes with n_iter=qr for sklearn.utils.extmath.randomized_svd
ka: Keyword args passed to method
Returns
-------
mi: numpy.ndarray(shape=(...,n,n),dtype=float)
Pseudo-inverse matrices
r: numpy.ndarray(shape=(...),dtype=int) or int
Matrix ranks
"""
import numpy as np
from numpy.linalg import LinAlgError
if logger is None:
import logging as logger
if m.ndim <= 1 or m.shape[-1] != m.shape[-2]:
raise ValueError('Wrong shape for m.')
if tol <= 0:
raise ValueError('tol must be positive.')
if qr < 0 or int(qr) != qr:
raise ValueError('qr must be non-negative integer.')
if method == 'auto':
if m.ndim > 2 and mpc > 0:
raise NotImplementedError(
'No current method supports >2 dimensions with mpc>0.')
elif m.shape[-1] <= mpc or mpc == 0:
# logger.debug('Automatically selected scipy method for matrix inverse.')
method = 'scipy'
else:
# logger.debug('Automatically selected sklearn method for matrix inverse.')
method = 'sklearn'
n = m.shape[-1]
if m.ndim == 2:
if method == 'scipy':
from scipy.linalg import svd
try:
s = svd(m, **ka)
except LinAlgError:
logger.warning(
"Default scipy.linalg.svd failed. Falling back to option lapack_driver='gesvd'. Expecting much slower computation."
)
s = svd(m, lapack_driver='gesvd', **ka)
n2 = n - np.searchsorted(s[1][::-1], tol * s[1][0])
if mpc > 0:
n2 = min(n2, mpc)
ans = np.matmul(s[2][:n2].T / s[1][:n2], s[2][:n2]).T
elif method == 'sklearn':
from sklearn.utils.extmath import randomized_svd as svd
n2 = min(mpc, n) if mpc > 0 else n
# Find enough n_components by increasing in steps
while True:
if qr == 1:
s = svd(m, n2, power_iteration_normalizer='QR', random_state=0, **ka)
elif qr > 1:
s = svd(m, n2, power_iteration_normalizer='QR', n_iter=qr, random_state=0, **ka)
else:
s = svd(m, n2, random_state=0, **ka)
if n2 == n or s[1][-1] <= tol * s[1][0] or mpc > 0:
break
n2 += np.min([n2, n - n2])
n2 = n2 - np.searchsorted(s[1][::-1], tol * s[1][0])
if mpc > 0:
n2 = min(n2, mpc)
ans = np.matmul(s[2][:n2].T / s[1][:n2], s[2][:n2]).T
else:
raise ValueError('Unknown method {}'.format(method))
else:
if method == 'scipy':
from scipy.linalg import svd
if mpc > 0:
raise NotImplementedError('Not supporting >2 dimensions for mpc>0.')
warned = False
m2 = m.reshape(np.prod(m.shape[:-2]), *m.shape[-2:])
s = []
for xi in m2:
try:
s.append(svd(xi, **ka))
except LinAlgError:
if not warned:
warned = True
logger.warning(
"Default scipy.linalg.svd failed. Falling back to option lapack_driver='gesvd'. Expecting much slower computation."
)
s.append(svd(xi, lapack_driver='gesvd', **ka))
n2 = n - np.array([np.searchsorted(x[1][::-1], tol * x[1][0]) for x in s])
ans = [np.matmul(x[2][:y].T / x[1][:y], x[2][:y]).T for x, y in zip(s, n2)]
ans = np.array(ans).reshape(*m.shape)
n2 = n2.reshape(*m.shape[:-2])
elif method == 'sklearn':
raise NotImplementedError(
'Not supporting >2 dimensions for method=sklearn.')
else:
raise ValueError('Unknown method {}'.format(method))
try:
if n2.ndim == 0:
n2 = n2.item()
except Exception:
pass
return ans, n2
def association_test_1(vx,
vy,
dx,
dy,
dc,
dci,
dcr,
dimreduce=0,
lowmem=False):
"""Fast linear association testing in single-cell non-cohort settings with covariates.
Single threaded version to allow for parallel computing wrapper. Mainly used for naive differential expression and co-expression. Computes exact P-value and effect size (gamma) with the model for linear association testing between each vector x and vector y:
y=gamma*x+alpha*C+epsilon,
epsilon~i.i.d. N(0,sigma**2).
Test statistic: conditional R**2 (or proportion of variance explained) between x and y.
Null hypothesis: gamma=0.
Parameters
----------
vx: any
Starting indices of dx. Only used for information passing.
vy: any
Starting indices of dy. Only used for information passing.
dx: numpy.ndarray(shape=(n_x,n_cell)).
Predictor matrix for a list of vector x to be tested, e.g. gene expression or grouping.
dy: numpy.ndarray(shape=(n_y,n_cell)).
Target matrix for a list of vector y to be tested, e.g. gene expression.
dc: numpy.ndarray(shape=(n_cov,n_cell)).
Covariate matrix as C.
dci: numpy.ndarray(shape=(n_cov,n_cov)).
Low-rank inverse matrix of dc*dc.T.
dcr: int
Rank of dci.
dimreduce: numpy.ndarray(shape=(ny,),dtype='uint') or int.
If each vector y doesn't have full rank in the first place, this parameter is the loss of degree of freedom to allow for accurate P-value computation.
lowmem: bool
Whether to save memory by neither computing nor returning alpha.
Returns
----------
vx: any
vx from input for information passing.
vy: any
vy from input for information passing.
pv: numpy.ndarray(shape=(n_x,n_y))
P-values of association testing (gamma==0).
gamma: numpy.ndarray(shape=(n_x,n_y))
Maximum likelihood estimator of gamma in model.
alpha: numpy.ndarray(shape=(n_x,n_y,n_cov)) or None
Maximum likelihood estimator of alpha in model if not lowmem else None.
var_x: numpy.ndarray(shape=(n_x,))
Variance of dx unexplained by covariates C.
var_y: numpy.ndarray(shape=(n_y,))
Variance of dy unexplained by covariates C.
"""
import numpy as np
from scipy.stats import beta
import logging
if len(dx.shape) != 2 or len(dy.shape) != 2 or len(dc.shape) != 2:
raise ValueError('Incorrect dx/dy/dc size.')
n = dx.shape[1]
if dy.shape[1] != n or dc.shape[1] != n:
raise ValueError('Unmatching dx/dy/dc dimensions.')
nc = dc.shape[0]
if nc == 0:
logging.warning('No covariate dc input.')
elif dci.shape != (nc, nc):
raise ValueError('Unmatching dci dimensions.')
if dcr < 0:
raise ValueError('Negative dcr detected.')
elif dcr > nc:
raise ValueError('dcr higher than covariate dimension.')
if n <= dcr + dimreduce + 1:
raise ValueError(
'Insufficient number of cells: must be greater than degrees of freedom removed + covariate + 1.'
)
nx = dx.shape[0]
ny = dy.shape[0]
dx1 = dx
dy1 = dy
if dcr > 0:
# Remove covariates
ccx = np.matmul(dci, np.matmul(dc, dx1.T)).T
ccy = np.matmul(dci, np.matmul(dc, dy1.T)).T
dx1 = dx1 - np.matmul(ccx, dc)
dy1 = dy1 - np.matmul(ccy, dc)
ansvx = (dx1**2).mean(axis=1)
ansvx[ansvx == 0] = 1
ansvy = (dy1**2).mean(axis=1)
ansvy[ansvy == 0] = 1
ansc = (np.matmul(dy1, dx1.T) / (n * ansvx)).T
ansp = ((ansc**2).T * ansvx).T / ansvy
if lowmem:
ansa = None
elif dcr > 0:
ansa = np.repeat(
ccy.reshape(1, ccy.shape[0], ccy.shape[1]), ccx.shape[0],
axis=0) - (ansc * np.repeat(ccx.T.reshape(ccx.shape[1], ccx.shape[0], 1),
ccy.shape[0],
axis=2)).transpose(1, 2, 0)
else:
ansa = np.zeros((nx, ny, nc), dtype=dx.dtype)
# Compute p-values
assert (ansp >= 0).all() and (ansp <= 1 + 1E-8).all()
ansp = beta.cdf(1 - ansp, (n - 1 - dcr - dimreduce) / 2, 0.5)
assert ansp.shape == (nx, ny) and ansc.shape == (nx, ny) and ansvx.shape == (
nx, ) and ansvy.shape == (ny, )
assert np.isfinite(ansp).all() and np.isfinite(ansc).all() and np.isfinite(
ansvx).all() and np.isfinite(ansvy).all()
assert (ansp >= 0).all() and (ansp <= 1).all() and (ansvx >= 0).all() and (
ansvy >= 0).all()
if lowmem:
assert ansa is None
else:
assert ansa.shape == (nx, ny, nc) and np.isfinite(ansa).all()
return [vx, vy, ansp, ansc, ansa, ansvx, ansvy]
def association_test_2(vx,
vy,
dx,
dy,
dc,
sselectx,
dimreduce=0,
lowmem=False):
"""Like association_test_1, but takes a different subset of samples for each x.
See association_test_1 for additional details.
Parameters
----------
vx: any
Starting indices of dx. Only used for information passing.
vy: any
Starting indices of dy. Only used for information passing.
dx: numpy.ndarray(shape=(n_x,n_cell)).
Predictor matrix for a list of vector x to be tested, e.g. gene expression or grouping.
dy: numpy.ndarray(shape=(n_y,n_cell)).
Target matrix for a list of vector y to be tested, e.g. gene expression.
dc: numpy.ndarray(shape=(n_cov,n_cell)).
Covariate matrix as C.
sselectx: numpy.ndarray(shape=(n_x,n_cell),dtype=bool)
Subset of samples to use for each x.
dimreduce: numpy.ndarray(shape=(ny,),dtype='uint') or int.
If each vector y doesn't have full rank in the first place, this parameter is the loss of degree of freedom to allow for accurate P-value computation.
lowmem: bool
Whether to save memory by neither computing nor returning alpha.
Returns
--------
vx: any
vx from input for information passing.
vy: any
vy from input for information passing.
pv: numpy.ndarray(shape=(n_x,n_y))
P-values of association testing (gamma==0).
gamma: numpy.ndarray(shape=(n_x,n_y))
Maximum likelihood estimator of gamma in model.
alpha: numpy.ndarray(shape=(n_x,n_y,n_cov)) or None
Maximum likelihood estimator of alpha in model if not lowmem else None.
var_x: numpy.ndarray(shape=(n_x,))
Variance of dx unexplained by covariates C.
var_y: numpy.ndarray(shape=(n_x,n_y))
Variance of dy unexplained by covariates C.
"""
import numpy as np
import logging
from scipy.stats import beta
if len(dx.shape) != 2 or len(dy.shape) != 2 or len(dc.shape) != 2:
raise ValueError('Incorrect dx/dy/dc size.')
n = dx.shape[1]
if dy.shape[1] != n or dc.shape[1] != n:
raise ValueError('Unmatching dx/dy/dc dimensions.')
nc = dc.shape[0]
if nc == 0:
logging.warning('No covariate dc input.')
if sselectx.shape != dx.shape:
raise ValueError('Unmatching sselectx dimensions.')
nx = dx.shape[0]
ny = dy.shape[0]
ansp = np.zeros((nx, ny), dtype=float)
ansvx = np.zeros((nx, ), dtype=float)
ansvy = np.zeros((nx, ny), dtype=float)
ansc = np.zeros((nx, ny), dtype=float)
if lowmem:
ansa = None
else:
ansa = np.zeros((nx, ny, nc), dtype=float)
ansn = np.zeros((nx, ny), dtype=int)
for xi in range(nx):
# Select samples
t1 = np.nonzero(sselectx[xi])[0]
ns = len(t1)
if len(np.unique(dx[xi, t1])) < 2:
continue
dx1 = dx[xi, t1]
dy1 = dy[:, t1]
if nc > 0:
# Remove covariates
dc1 = dc[:, t1]
t1 = np.matmul(dc1, dc1.T)
t1i, r = inv_rank(t1)
else:
r = 0
ansn[xi] = r
if r > 0:
# Remove covariates
ccx = np.matmul(t1i, np.matmul(dc1, dx1.T)).T
ccy = np.matmul(t1i, np.matmul(dc1, dy1.T)).T
dx1 = dx1 - np.matmul(ccx, dc1)
dy1 = dy1 - np.matmul(ccy, dc1)
t1 = (dx1**2).mean()
if t1 == 0:
# X completely explained by covariate. Should never happen in theory.
t1 = 1
ansvx[xi] = t1
ansvy[xi] = (dy1**2).mean(axis=1)
ansc[xi] = np.matmul(dx1, dy1.T).ravel() / (ns * t1)
if (not lowmem) and r > 0:
ansa[xi] = ccy - np.repeat(ansc[xi].reshape(ny, 1), nc,
axis=1) * ccx.ravel()
ansp[xi] = (ansc[xi]**2) * t1 / ansvy[xi]
# Compute p-values
assert (ansp >= 0).all() and (ansp <= 1 + 1E-8).all()
t1 = (sselectx.sum(axis=1) - 1 - ansn.T - dimreduce).T
if (t1 <= 0).any():
raise RuntimeError(
'Insufficient number of cells: must be greater than degrees of freedom removed + covariate + 1.'
)
ansp = beta.cdf(1 - ansp, t1 / 2, 0.5)
assert ansp.shape == (nx, ny) and ansc.shape == (nx, ny) and ansvx.shape == (
nx, ) and ansvy.shape == (nx, ny)
assert np.isfinite(ansp).all() and np.isfinite(ansc).all() and np.isfinite(
ansvx).all() and np.isfinite(ansvy).all()
assert (ansp >= 0).all() and (ansp <= 1).all() and (ansvx >= 0).all() and (
ansvy >= 0).all()
if lowmem:
assert ansa is None
else:
assert ansa.shape == (nx, ny, nc) and np.isfinite(ansa).all()
return [vx, vy, ansp, ansc, ansa, ansvx, ansvy]
def prod1(vx, vy, dx, dy):
"""Pickleable function for matrix product that keeps information
Parameters
-------------
vx: any
Information passed
vy: any
Information passed
dx: numpy.ndarray(shape=(...,n))
Matrix for multiplication
dy: numpy.ndarray(shape=(...,n))
Matrix for multiplication
Returns
---------
vx: any
vx
vy: any
vy
product: numpy.ndarray(shape=(...))
dx\ @\ dy.T
"""
import numpy as np
return (vx, vy, np.matmul(dx, dy.T))
def association_test_4(vx,
vy,
prod,
prody,
prodyy,
na,
dimreduce=0,
lowmem=False,
**ka):
"""Like association_test_1, but regards all other (untested) x's as covariates when testing each x.
Also allows for dx==dy setting, where neither tested x or y is regarded as a covariate.
See association_test_1 for additional details. Other x's are treated as covariates but their coefficients (alpha) would not be returned to reduce memory footprint.
Parameters
----------
vx: any
Starting indices of dx.
vy: any
Starting indices of dy. Only used for information passing.
prod: numpy.ndarray(shape=(n_x+n_cov,n_x+n_cov))
A\ @\ A.T, where A=numpy.block([dx,dc]).
prody: numpy.ndarray(shape=(n_x+n_cov,n_y)) or None
A\ @\ dy.T, where A=numpy.block([dx,dc]). If None, indicating dx==dy and skipping tested y as a covariate.
prodyy: numpy.ndarray(shape=(n_y,)) or None
(dy**2).sum(axis=1). If None, indicating dx==dy and skipping tested y as a covariate.
na: tuple
(n_x,n_y,n_cov,n_cell,lenx). Numbers of (x's, y's, covariates, cells, x's to compute association for)
dimreduce: numpy.ndarray(shape=(ny,),dtype='uint') or int.
If each vector y doesn't have full rank in the first place, this parameter is the loss of degree of freedom to allow for accurate P-value computation.
lowmem: bool
Whether to save memory by neither computing nor returning alpha.
ka: dict
Keyword arguments passed to inv_rank.
Returns
--------
vx: any
vx from input for information passing.
vy: any
vy from input for information passing.
pv: numpy.ndarray(shape=(n_x,n_y))
P-values of association testing (gamma==0).
gamma: numpy.ndarray(shape=(n_x,n_y))
Maximum likelihood estimator of gamma in model.
alpha: numpy.ndarray(shape=(n_x,n_y,n_cov)) or None
Maximum likelihood estimator of alpha in model if not lowmem else None.
var_x: numpy.ndarray(shape=(lenx,)) or None
Variance of dx unexplained by covariates C if prody is not None else None.
var_y: numpy.ndarray(shape=(lenx,n_y))
Variance of dy unexplained by covariates C or untested x.
"""
import numpy as np
from scipy.stats import beta
import logging
import itertools
if len(na) != 5:
raise ValueError('Wrong format for na')
nx, ny, nc, n, lenx = na
if nx == 0 or ny == 0 or n == 0:
raise ValueError('Dimensions in na==0 detected.')
if nc == 0:
logging.warning('No covariate dc input.')
if lenx <= 0:
raise ValueError('lenx must be positive.')
if vx < 0 or vx + lenx > nx:
raise ValueError('Wrong values of vx and/or lenx, negative or beyond nx.')
if prod.shape != (nx + nc, nx + nc):
raise ValueError('Unmatching shape for prod. Expected: {}. Got: {}.'.format(
(nx + nc, nx + nc), prod.shape))
if prody is None:
samexy = True
assert prodyy is None
prody = prod[:, vy:(vy + ny)]
prodyy = prod[np.arange(ny) + vy, np.arange(ny) + vy]
else:
samexy = False
if prody.shape != (nx + nc, ny):
raise ValueError('Unmatching shape for prody. Expected: {}. Got: {}.'.format(
(nx + nc, ny), prody.shape))
if prodyy.shape != (ny, ):
raise ValueError(
'Unmatching shape for prodyy. Expected: {}. Got: {}.'.format(
(ny, ), prodyy.shape))
ansp = np.zeros((lenx, ny), dtype=float)
ansvx = np.zeros((lenx, ), dtype=float)
ansvy = np.zeros((lenx, ny), dtype=float)
ansc = np.zeros((lenx, ny), dtype=float)
if lowmem:
ansa = None
else:
ansa = np.zeros((lenx, ny, nc), dtype=float)
ansn = np.zeros((lenx, ny), dtype=int)
if samexy:
it = itertools.product(range(lenx), range(ny))
it = [[x[0], [x[1]]] for x in it if x[0] + vx < x[1] + vy]
else:
it = [[x, np.arange(ny)] for x in range(lenx)]
for xi in it:
# Covariate IDs
t0 = list(filter(lambda x: x != vx + xi[0], range(nx + nc)))
if samexy:
t0 = list(filter(lambda x: x != vy + xi[1][0], t0))
if len(t0) > 0:
t1 = prod[np.ix_(t0, t0)]
t1i, r = inv_rank(t1, **ka)
else:
r = 0
ansn[xi[0], xi[1]] = r
if r == 0:
# No covariate
dxx = prod[vx + xi[0], vx + xi[0]] / n
dyy = prodyy[xi[1]] / n
dxy = prody[vx + xi[0], xi[1]] / n
else:
ccx = np.matmul(prod[[vx + xi[0]], t0], t1i)
dxx = (prod[vx + xi[0], vx + xi[0]]
- float(np.matmul(ccx, prod[t0, [vx + xi[0]]]))) / n
ccy = np.matmul(prody[t0][:, xi[1]].T, t1i)
dyy = (prodyy[xi[1]] - (ccy.T * prody[t0][:, xi[1]]).sum(axis=0)) / n
dxy = (prody[vx + xi[0], xi[1]]
- np.matmul(ccy, prod[t0, [vx + xi[0]]]).ravel()) / n
if dxx == 0:
# X completely explained by covariate. Should never happen in theory.
dxx = 1
ansvx[xi[0]] = dxx
ansvy[xi[0], xi[1]] = dyy
ansc[xi[0], xi[1]] = dxy / dxx
if (not lowmem) and r > 0:
ansa[xi[0], xi[1]] = ccy[:, -nc:] - np.repeat(
ansc[xi[0], xi[1]].reshape(len(xi[1]), 1), nc, axis=1) * ccx[-nc:]
ansp[xi[0], xi[1]] = (dxy**2) / (dxx * dyy)
# Compute p-values
assert (ansp >= 0).all() and (ansp <= 1 + 1E-8).all()
t1 = n - 1 - ansn - dimreduce
if (t1 <= 0).any():
raise RuntimeError(
'Insufficient number of cells: must be greater than degrees of freedom removed + covariate + 1.'
)
ansp = beta.cdf(1 - ansp, t1 / 2, 0.5)
assert ansp.shape == (lenx, ny) and ansc.shape == (
lenx, ny) and ansvx.shape == (lenx, ) and ansvy.shape == (lenx, ny)
assert np.isfinite(ansp).all() and np.isfinite(ansc).all() and np.isfinite(
ansvx).all() and np.isfinite(ansvy).all()
assert (ansp >= 0).all() and (ansp <= 1).all() and (ansvx >= 0).all() and (
ansvy >= 0).all()
if samexy:
ansvx = None
if lowmem:
assert ansa is None
else:
assert ansa.shape == (lenx, ny, nc) and np.isfinite(ansa).all()
return [vx, vy, ansp, ansc, ansa, ansvx, ansvy]
def association_test_5(vx,
vy,
prod,
prody,
prodyy,
na,
mask,
dimreduce=0,
lowmem=False,
**ka):
"""Like association_test_4, but uses mask to determine which X can affect which Y. Under development.
Parameters
----------
vx: any
Starting indices of dx.
vy: any
Starting indices of dy. Only used for information passing.
prod: numpy.ndarray(shape=(n_x+n_cov,n_x+n_cov))
A\ @\ A.T, where A=numpy.block([dx,dc]).
prody: numpy.ndarray(shape=(n_x+n_cov,n_y)) or None
A\ @\ dy.T, where A=numpy.block([dx,dc]). If None, indicating dx==dy and skipping tested y as a covariate.
prodyy: numpy.ndarray(shape=(n_y,)) or None
(dy**2).sum(axis=1). If None, indicating dx==dy and skipping tested y as a covariate.
na: tuple
(n_x,n_y,n_cov,n_cell,lenx). Numbers of (x's, y's, covariates, cells, x's to compute association for)
mask: numpy.ndarray(shape=(n_x,n_y),dtype=bool)
Prior constraint on whether each X can affect each Y.
dimreduce: numpy.ndarray(shape=(ny,),dtype='uint') or int.
If each vector y doesn't have full rank in the first place, this parameter is the loss of degree of freedom to allow for accurate P-value computation.
lowmem: bool
Whether to save memory by neither computing nor returning alpha.
ka: dict
Keyword arguments passed to inv_rank.
Returns
--------
vx: any
vx from input for information passing.
vy: any
vy from input for information passing.
pv: numpy.ndarray(shape=(n_x,n_y))
P-values of association testing (gamma==0).
gamma: numpy.ndarray(shape=(n_x,n_y))
Maximum likelihood estimator of gamma in model.
alpha: numpy.ndarray(shape=(n_x,n_y,n_cov)) or None
Maximum likelihood estimator of alpha in model if not lowmem else None.
var_x: numpy.ndarray(shape=(lenx,n_y))
Variance of dx unexplained by covariates C or untested x.
var_y: numpy.ndarray(shape=(lenx,n_y))
Variance of dy unexplained by covariates C or untested x.
"""
import numpy as np
from scipy.stats import beta
import logging
import itertools
if len(na) != 5:
raise ValueError('Wrong format for na')
nx, ny, nc, n, lenx = na
if nx == 0 or ny == 0 or n == 0:
raise ValueError('Dimensions in na==0 detected.')
if nc == 0:
logging.warning('No covariate dc input.')
if lenx <= 0:
raise ValueError('lenx must be positive.')
if vx < 0 or vx + lenx > nx:
raise ValueError('Wrong values of vx and/or lenx, negative or beyond nx.')
if prod.shape != (nx + nc, nx + nc):
raise ValueError('Unmatching shape for prod. Expected: {}. Got: {}.'.format(
(nx + nc, nx + nc), prod.shape))
if prody.shape != (nx + nc, ny):
raise ValueError('Unmatching shape for prody. Expected: {}. Got: {}.'.format(
(nx + nc, ny), prody.shape))
if prodyy.shape != (ny, ):
raise ValueError(
'Unmatching shape for prodyy. Expected: {}. Got: {}.'.format(
(ny, ), prodyy.shape))
if mask.shape != (nx, ny):
raise ValueError('Unmatching shape for mask. Expected: {}. Got: {}.'.format(
(nx, ny), mask.shape))
ansp = np.zeros((lenx, ny), dtype=float)
ansvx = np.zeros((lenx, ny), dtype=float)
ansvy = np.zeros((lenx, ny), dtype=float)
ansc = np.zeros((lenx, ny), dtype=float)
if lowmem:
ansa = None
else:
ansa = np.zeros((lenx, ny, nc), dtype=float)
ansn = np.zeros((lenx, ny), dtype=int)
it = np.array(np.nonzero(mask)).T
it = it[(it[:, 0] >= vx) & (it[:, 0] < vx + lenx)]
it[:, 0] -= vx
for xi in it:
# Covariate IDs
t0 = list(
filter(
lambda x: x != vx + xi[0],
itertools.chain(np.nonzero(mask[:, xi[1]])[0],
np.arange(nc) + nx)))
if len(t0) > 0:
t1 = prod[np.ix_(t0, t0)]
t1i, r = inv_rank(t1, **ka)
else:
r = 0
ansn[xi[0], xi[1]] = r
if r == 0:
# No covariate
dxx = prod[vx + xi[0], vx + xi[0]] / n
dyy = prodyy[xi[1]] / n
dxy = prody[vx + xi[0], xi[1]] / n
else:
ccx = prod[[vx + xi[0]], t0] @ t1i
dxx = (prod[vx + xi[0], vx + xi[0]]
- float(np.matmul(ccx, prod[t0, [vx + xi[0]]]))) / n
ccy = prody[t0, xi[1]] @ t1i
dyy = (prodyy[xi[1]] - ccy @ prody[t0, xi[1]]) / n
dxy = (prody[vx + xi[0], xi[1]] - ccy @ prod[t0, vx + xi[0]]) / n
if dxx == 0:
# X completely explained by covariate. Should never happen in theory.
dxx = 1
ansvx[xi[0]] = dxx
ansvy[xi[0], xi[1]] = dyy
ansc[xi[0], xi[1]] = dxy / dxx
if (not lowmem) and r > 0:
ansa[xi[0],
xi[1]] = ccy[-nc:] - np.repeat(ansc[xi[0], xi[1]], nc) * ccx[-nc:]
ansp[xi[0], xi[1]] = (dxy**2) / (dxx * dyy)
# Compute p-values
assert (ansp >= 0).all() and (ansp <= 1 + 1E-8).all()
t1 = n - 1 - ansn - dimreduce
if (t1 <= 0).any():
raise RuntimeError(
'Insufficient number of cells: must be greater than degrees of freedom removed + covariate + 1.'
)
ansp = beta.cdf(1 - ansp, t1 / 2, 0.5)
assert ansp.shape == (lenx, ny) and ansc.shape == (
lenx, ny) and ansvx.shape == (lenx, ny) and ansvy.shape == (lenx, ny)
assert np.isfinite(ansp).all() and np.isfinite(ansc).all() and np.isfinite(
ansvx).all() and np.isfinite(ansvy).all()
assert (ansp >= 0).all() and (ansp <= 1).all() and (ansvx >= 0).all() and (
ansvy >= 0).all()
if lowmem:
assert ansa is None
else:
assert ansa.shape == (lenx, ny, nc) and np.isfinite(ansa).all()
return [vx, vy, ansp, ansc, ansa, ansvx, ansvy]
def _auto_batchsize(bsx,
bsy,
itemsizex,
itemsizey,
itemsizec,
nc,
ns,
samexy,
maxx=500,
maxy=500,
sizemax=2**30):
import logging
if bsx == 0:
# Transfer 1GB data max
bsx = int((sizemax - itemsizec * nc * ns) // (2 * itemsizex * ns))
bsx = min(bsx, maxx)
if samexy:
logging.info('Using automatic batch size: {}'.format(bsx))
else:
logging.info('Using automatic batch size for X: {}'.format(bsx))
if bsy == 0 or samexy:
if samexy:
bsy = bsx
else:
bsy = int((sizemax - itemsizec * nc * ns) // (2 * itemsizey * ns))
bsy = min(bsy, maxy)
logging.info('Using automatic batch size for Y: {}'.format(bsy))
return bsx, bsy
def association_tests(dx,
dy,
dc,
bsx=0,
bsy=0,
nth=1,
lowmem=True,
return_dot=True,
single=0,
bs4=500,
**ka):
"""Performs association tests between all pairs of two (or one) variables. Performs parallel computation with multiple processes on the same machine.
Allows multiple options to treat other/untested dx when testing on one (see parameter *single*).
Performs parallel computation with multiple processes on the same machine.
Model for differential expression between X and Y:
Y=gamma*X+alpha*C+epsilon,
epsilon~i.i.d. N(0,sigma**2).
Test statistic: conditional R**2 (or proportion of variance explained) between Y and X.
Null hypothesis: gamma=0.
Parameters
-----------
dx: numpy.ndarray(shape=(n_x,n_cell)).
Normalized matrix X.
dy: numpy.ndarray(shape=(n_y,n_cell),dtype=float) or None
Normalized matrix Y. If None, indicates dy=dx, i.e. self-association between pairs within X.
dc: numpy.ndarray(shape=(n_cov,n_cell),dtype=float)
Normalized covariate matrix C.
bsx: int
Batch size, i.e. number of Xs in each computing batch. Defaults to 500.
bsy: int
Batch size, i.e. number of Xs in each computing batch. Defaults to 500. Ignored if dy is None.
nth: int
Number of parallel processes. Set to 0 for using automatically detected CPU counts.
lowmem: bool
Whether to replace alpha in return value with None to save memory
return_dot: bool
Whether to return dot product betwen dx and dy instead of coefficient gamma
single: int
Type of association test to perform that determines which cells and covariates are used for each association test between X and Y. Accepts the following values:
* 0: Simple pairwise association test between each X and Y across all cells.
* 1: Association test for each X uses only cells that have all zeros in dx for all other Xs. A typical application is low-MOI CRISPR screen.
* 4: Association test for each X uses all cells but regarding all other Xs as covariates that confound mean expression levels. This is suitable for high-MOI CRISPR screen.
* 5: Similar with 4 but uses mask to determine which X can affect which Y. Under development.
bs4: int
Batch size for matrix product when single=4. Defaults to 500.
ka: dict
Keyword arguments passed to normalisr.association.association_test_X. See below.
Returns
--------
P-values: numpy.ndarray(shape=(n_x,n_y))
Differential expression P-value matrix.
dot/gamma: numpy.ndarray(shape=(n_x,n_y))
If return_dot, inner product between X and Y pairs after removing covariates. Otherwise, matrix gamma.
alpha: numpy.ndarray(shape=(n_x,n_y,n_cov)) or None
Maximum likelihood estimators of alpha, separatly tested for each grouping if not lowmem else None.
varx: numpy.ndarray(shape=(n_x)) or numpy.ndarray(shape=(n_x,n_y)) or None
Variance of grouping after removing covariates if dy is not None and single!=5 else None
vary: numpy.ndarray(shape=(n_y)) if single==0 else numpy.ndarray(shape=(n_x,n_y))
Variance of gene expression after removing covariates.
Its shape depends on parameter single.
Keyword arguments
--------------------------------
dimreduce: numpy.ndarray(shape=(n_y,),dtype=int) or int
If dy doesn't have full rank, such as due to prior covariate removal (although the recommended method is to leave covariates in dc), this parameter allows to specify the loss of ranks/degrees of freedom to allow for accurate P-value computation. Default is 0, indicating no rank loss.
mask: numpy.ndarray(shape=(n_x,n_y),dtype=bool)
Whether each X can affect each Y. Only active for single==5.
"""
import numpy as np
import logging
import itertools
from .parallel import autopooler
ka0 = dict(ka)
ka0['lowmem'] = lowmem
if dy is None:
dy = dx
samexy = True
else:
samexy = False
nx, ns = dx.shape
ny = dy.shape[0]
nc = dc.shape[0]
if single in {0, 1}:
bsx, bsy = _auto_batchsize(bsx,
bsy,
dx.dtype.itemsize,
dy.dtype.itemsize,
dc.dtype.itemsize,
nc,
ns,
samexy,
maxx=500,
maxy=500)
elif single in {4}:
bsx, bsy = _auto_batchsize(bsx,
bsy,
dx.dtype.itemsize,
dy.dtype.itemsize,
dc.dtype.itemsize,
nc,
ns,
samexy,
maxx=10,
maxy=500000)
elif single in {5}:
bsx, bsy = _auto_batchsize(bsx,
bsy,
dx.dtype.itemsize,
dy.dtype.itemsize,
dc.dtype.itemsize,
nc,
ns,
samexy,
maxx=500000,
maxy=10)
else:
raise ValueError('Unknown value single={}'.format(single))
it0 = itertools.product(
map(lambda x: [x, min(x + bsx, nx)], range(0, nx, bsx)),
map(lambda x: [x, min(x + bsy, ny)], range(0, ny, bsy)))
if samexy and single not in {5}:
it0 = filter(lambda x: x[0][0] <= x[1][0], it0)
isdummy = True
if single in {0}:
# Pre-compute matrix inverse
if nc > 0 and (dc != 0).any():
dci, dcr = inv_rank(np.matmul(dc, dc.T))
else:
dci = None
dcr = 0
# Prepare parallel iterator
it0 = map(
lambda x: [
association_test_1,
(x[0][0], x[1][0], dx[x[0][0]:x[0][1]], dy[x[1][0]:x[1][1]], dc, dci,
dcr), ka0], it0)
elif single in {1}:
if samexy:
raise NotImplementedError('dy=None with single=1')
# Decide grouping
assert dx.max() == 1
t1 = dx.sum(axis=0)
t1 = dx == t1
for xi in range(nx):
assert len(np.unique(dx[xi, t1[xi]])) > 1
sselectx = t1
# Prepare parallel iterator
it0 = map(
lambda x: [
association_test_2,
(x[0][0], x[1][0], dx[x[0][0]:x[0][1]], dy[x[1][0]:x[1][1]], dc,
sselectx[x[0][0]:x[0][1]]), ka0], it0)
elif single in {4, 5}:
if bs4 == 0:
# Transfer 1GB data max
bs4 = int((2**30 - dc.dtype.itemsize * nc * ns) //
(2 * dx.dtype.itemsize * ns))
bs4 = min(bs4, 500)
logging.info(
'Using automatic batch size for matrix product: {}'.format(bs4))
# Compute matrix products
t1 = np.concatenate([dx, dc], axis=0)
it = itertools.product(
map(lambda x: [x, min(x + bs4, nx + nc)], range(0, nx + nc, bs4)),
map(lambda x: [x, min(x + bs4, nx + nc)], range(0, nx + nc, bs4)))
it = filter(lambda x: x[0][0] <= x[1][0], it)
it = map(
lambda x: [
prod1, (x[0][0], x[1][0], t1[x[0][0]:x[0][1]], t1[x[1][0]:x[1][1]]),
dict()], it)
ans0 = autopooler(nth, it, dummy=True)
tprod = np.zeros((nx + nc, nx + nc), dtype=dy.dtype)
for xi in ans0:
tprod[np.ix_(range(xi[0], xi[0] + xi[2].shape[0]),
range(xi[1], xi[1] + xi[2].shape[1]))] = xi[2]
del ans0
tprod = np.triu(tprod).T + np.triu(tprod, 1)
if single == 4:
if not samexy:
it = itertools.product(
map(lambda x: [x, min(x + bs4, nx + nc)], range(0, nx + nc, bs4)),
map(lambda x: [x, min(x + bs4, ny)], range(0, ny, bs4)))
it = map(
lambda x: [
prod1,
(x[0][0], x[1][0], t1[x[0][0]:x[0][1]], dy[x[1][0]:x[1][1]]),
dict()], it)
ans0 = autopooler(nth, it, dummy=True)
del t1
tprody = np.zeros((nx + nc, ny), dtype=dy.dtype)
for xi in ans0:
tprody[np.ix_(range(xi[0], xi[0] + xi[2].shape[0]),
range(xi[1], xi[1] + xi[2].shape[1]))] = xi[2]
del ans0
tprodyy = (dy**2).sum(axis=1)
it0 = map(
lambda x: [
association_test_4,
(x[0][0], x[1][0], tprod, tprody[:, x[1][0]:x[1][1]],
tprodyy[x[1][0]:x[1][1]], [
nx, x[1][1] - x[1][0], nc, ns, x[0][1] - x[0][0]]), ka0], it0)
else:
it0 = map(
lambda x: [
association_test_4,
(x[0][0], x[1][0], tprod, None, None, [
nx, x[1][1] - x[1][0], nc, ns, x[0][1] - x[0][0]]), ka0], it0)
elif single == 5:
mask = ka0.pop('mask')
assert mask.shape == (nx, ny)
it0 = map(
lambda x: [
association_test_5,
(x[0][0], x[1][0], tprod, tprod[:, x[1][0]:x[1][1]], tprod[
| np.arange(x[1][0], x[1][1]) | numpy.arange |
__author__ = 'feurerm'
import copy
import unittest
import numpy as np
import sklearn.datasets
import sklearn.metrics
from autosklearn.pipeline.components.data_preprocessing.balancing.balancing \
import Balancing
from autosklearn.pipeline.classification import SimpleClassificationPipeline
from autosklearn.pipeline.components.classification.adaboost import AdaboostClassifier
from autosklearn.pipeline.components.classification.decision_tree import DecisionTree
from autosklearn.pipeline.components.classification.extra_trees import ExtraTreesClassifier
from autosklearn.pipeline.components.classification.gradient_boosting import GradientBoostingClassifier
from autosklearn.pipeline.components.classification.random_forest import RandomForest
from autosklearn.pipeline.components.classification.liblinear_svc import LibLinear_SVC
from autosklearn.pipeline.components.classification.libsvm_svc import LibSVM_SVC
from autosklearn.pipeline.components.classification.sgd import SGD
from autosklearn.pipeline.components.feature_preprocessing\
.extra_trees_preproc_for_classification import ExtraTreesPreprocessorClassification
from autosklearn.pipeline.components.feature_preprocessing.liblinear_svc_preprocessor import LibLinear_Preprocessor
class BalancingComponentTest(unittest.TestCase):
def test_balancing_get_weights_treed_single_label(self):
Y = | np.array([0] * 80 + [1] * 20) | numpy.array |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.