prompt
stringlengths
19
879k
completion
stringlengths
3
53.8k
api
stringlengths
8
59
""" Intra-class Correlation (3, 1) Notes ----- * Based on the code available at <https://github.com/ekmolloy/fmri_test-retest> | .. [McGraw1996] <NAME>., & <NAME>. (1996). Forming inferences about some intraclass correlation coefficients. Psychological methods, 1(1), 30. .. [Birn2013] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., ... & <NAME>. (2013). The effect of scan length on the reliability of resting-state fMRI connectivity estimates. Neuroimage, 83, 550-558. """ # Author: <NAME> <<EMAIL>> import numpy as np import scipy def icc_31(X: "np.ndarray[np.float32]") -> float: """ ICC (3,1) Parameters ---------- X : Input data Returns ------- icc : float Intra-class correlation. """ _, k = np.shape(X) # type: ignore _, ms, _, _ = _anova(X) BMS = ms[2] EMS = ms[4] icc = (BMS - EMS) / (BMS + (k - 1) * EMS) return icc def _anova(X): """ """ m, n = np.shape(X) total = m * n A = np.sum(np.sum(np.power(X, 2.0))) Bc = np.sum(np.power(np.sum(X, 0), 2.0)) /
np.float32(m)
numpy.float32
#!/usr/bin/env python import keras from keras.models import Sequential from keras.models import Model from keras.layers import Input, Dense, Activation, Lambda from keras.layers.convolutional import Conv2D from keras.layers.pooling import MaxPooling2D from keras.layers.normalization import BatchNormalization from keras.layers.merge import Concatenate import scipy import math import cv2 import matplotlib import pylab as plt import numpy as np from packages import freenect from packages import frame_convert2 from packages import util from config_reader import config_reader import tensorflow as tf import numpy as np import six.moves.urllib as urllib import matplotlib.image as mpimg from time import time from collections import defaultdict from io import StringIO from matplotlib import pyplot as plt from PIL import Image # Helper functions to create a model def relu(x): return Activation('relu')(x) def conv(x, nf, ks, name): x1 = Conv2D(nf, (ks, ks), padding='same', name=name)(x) return x1 def pooling(x, ks, st, name): x = MaxPooling2D((ks, ks), strides=(st, st), name=name)(x) return x def vgg_block(x): # Block 1 x = conv(x, 64, 3, "conv1_1") x = relu(x) x = conv(x, 64, 3, "conv1_2") x = relu(x) x = pooling(x, 2, 2, "pool1_1") # Block 2 x = conv(x, 128, 3, "conv2_1") x = relu(x) x = conv(x, 128, 3, "conv2_2") x = relu(x) x = pooling(x, 2, 2, "pool2_1") # Block 3 x = conv(x, 256, 3, "conv3_1") x = relu(x) x = conv(x, 256, 3, "conv3_2") x = relu(x) x = conv(x, 256, 3, "conv3_3") x = relu(x) x = conv(x, 256, 3, "conv3_4") x = relu(x) x = pooling(x, 2, 2, "pool3_1") # Block 4 x = conv(x, 512, 3, "conv4_1") x = relu(x) x = conv(x, 512, 3, "conv4_2") x = relu(x) # Additional non vgg layers x = conv(x, 256, 3, "conv4_3_CPM") x = relu(x) x = conv(x, 128, 3, "conv4_4_CPM") x = relu(x) return x def stage1_block(x, num_p, branch): # Block 1 x = conv(x, 128, 3, "conv5_1_CPM_L%d" % branch) x = relu(x) x = conv(x, 128, 3, "conv5_2_CPM_L%d" % branch) x = relu(x) x = conv(x, 128, 3, "conv5_3_CPM_L%d" % branch) x = relu(x) x = conv(x, 512, 1, "conv5_4_CPM_L%d" % branch) x = relu(x) x = conv(x, num_p, 1, "conv5_5_CPM_L%d" % branch) return x def stageT_block(x, num_p, stage, branch): # Block 1 x = conv(x, 128, 7, "Mconv1_stage%d_L%d" % (stage, branch)) x = relu(x) x = conv(x, 128, 7, "Mconv2_stage%d_L%d" % (stage, branch)) x = relu(x) x = conv(x, 128, 7, "Mconv3_stage%d_L%d" % (stage, branch)) x = relu(x) x = conv(x, 128, 7, "Mconv4_stage%d_L%d" % (stage, branch)) x = relu(x) x = conv(x, 128, 7, "Mconv5_stage%d_L%d" % (stage, branch)) x = relu(x) x = conv(x, 128, 1, "Mconv6_stage%d_L%d" % (stage, branch)) x = relu(x) x = conv(x, num_p, 1, "Mconv7_stage%d_L%d" % (stage, branch)) return x # CREATE KERAS MODEL AND LOAD WEIGHTS: weights_path = "model/keras/model.h5" # orginal weights converted from caffe #weights_path = "training/weights.best.h5" # weights tarined from scratch input_shape = (None,None,3) img_input = Input(shape=input_shape) stages = 6 np_branch1 = 38 np_branch2 = 19 img_normalized = Lambda(lambda x: x / 256 - 0.5)(img_input) # [-0.5, 0.5] # VGG stage0_out = vgg_block(img_normalized) # stage 1 stage1_branch1_out = stage1_block(stage0_out, np_branch1, 1) stage1_branch2_out = stage1_block(stage0_out, np_branch2, 2) x = Concatenate()([stage1_branch1_out, stage1_branch2_out, stage0_out]) # stage t >= 2 for sn in range(2, stages + 1): stageT_branch1_out = stageT_block(x, np_branch1, sn, 1) stageT_branch2_out = stageT_block(x, np_branch2, sn, 2) if (sn < stages): x = Concatenate()([stageT_branch1_out, stageT_branch2_out, stage0_out]) model = Model(img_input, [stageT_branch1_out, stageT_branch2_out]) model.load_weights(weights_path) slim = tf.contrib.slim def get_video(): ## getting video from the kinect # return frame_convert2.video_cv(freenect.sync_get_video()[0]) ## getting video from the mac builtin camera return cv2.VideoCapture(0).read()[1]; def fig2data ( fig ): """ @brief Convert a Matplotlib figure to a 4D np array with RGBA channels and return it @param fig a matplotlib figure @return a np 3D array of RGBA values """ # draw the renderer fig.canvas.draw ( ) # Get the RGBA buffer from the figure w,h = fig.canvas.get_width_height() buf = np.fromstring ( fig.canvas.tostring_argb(), dtype=np.uint8 ) buf.shape = ( w, h,4 ) # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode buf = np.roll ( buf, 3, axis = 2 ) return buf # USED FOR THE TESTS test_image = 'data/setup_data/init_frame.jpg' oriImg = cv2.imread(test_image) # B,G,R order # img = get_video() # oriImg = img # B,G,R order plt.imshow(oriImg[:,:,[2,1,0]]) ## showing original image # Load configuration param, model_params = config_reader() multiplier = [x * model_params['boxsize'] / oriImg.shape[0] for x in param['scale_search']] # Show sample heatmaps for right elbow and paf for right wrist and right elbow heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19)) paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38)) ## DISPLAYING GRAPHS AND PLOTS # first figure shows padded images # f, axarr = plt.subplots(1, len(multiplier)) # f.set_size_inches((20, 5)) # # second figure shows heatmaps # f2, axarr2 = plt.subplots(1, len(multiplier)) # f2.set_size_inches((20, 5)) # # third figure shows PAFs # f3, axarr3 = plt.subplots(2, len(multiplier)) # f3.set_size_inches((20, 10)) for m in range(len(multiplier)): scale = multiplier[m] imageToTest = cv2.resize(oriImg, (0,0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) imageToTest_padded, pad = util.padRightDownCorner(imageToTest, model_params['stride'], model_params['padValue']) # axarr[m].imshow(imageToTest_padded[:,:,[2,1,0]]) # axarr[m].set_title('Input image: scale %d' % m) input_img = np.transpose(np.float32(imageToTest_padded[:,:,:,np.newaxis]), (3,0,1,2)) # required shape (1, width, height, channels) print("Input shape: " + str(input_img.shape)) output_blobs = model.predict(input_img) print("Output shape (heatmap): " + str(output_blobs[1].shape)) # extract outputs, resize, and remove padding heatmap = np.squeeze(output_blobs[1]) # output 1 is heatmaps heatmap = cv2.resize(heatmap, (0,0), fx=model_params['stride'], fy=model_params['stride'], interpolation=cv2.INTER_CUBIC) heatmap = heatmap[:imageToTest_padded.shape[0]-pad[2], :imageToTest_padded.shape[1]-pad[3], :] heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC) paf = np.squeeze(output_blobs[0]) # output 0 is PAFs paf = cv2.resize(paf, (0,0), fx=model_params['stride'], fy=model_params['stride'], interpolation=cv2.INTER_CUBIC) paf = paf[:imageToTest_padded.shape[0]-pad[2], :imageToTest_padded.shape[1]-pad[3], :] paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC) # visualization # axarr2[m].imshow(oriImg[:,:,[2,1,0]]) # ax2 = axarr2[m].imshow(heatmap[:,:,3], alpha=.5) # right elbow # axarr2[m].set_title('Heatmaps (Relb): scale %d' % m) # axarr3.flat[m].imshow(oriImg[:,:,[2,1,0]]) # ax3x = axarr3.flat[m].imshow(paf[:,:,16], alpha=.5) # right elbow # axarr3.flat[m].set_title('PAFs (x comp. of Rwri to Relb): scale %d' % m) # axarr3.flat[len(multiplier) + m].imshow(oriImg[:,:,[2,1,0]]) # ax3y = axarr3.flat[len(multiplier) + m].imshow(paf[:,:,17], alpha=.5) # right wrist # axarr3.flat[len(multiplier) + m].set_title('PAFs (y comp. of Relb to Rwri): scale %d' % m) heatmap_avg = heatmap_avg + heatmap / len(multiplier) paf_avg = paf_avg + paf / len(multiplier) ## MORE GRAPHS AND PLOTS: # f2.subplots_adjust(right=0.93) # cbar_ax = f2.add_axes([0.95, 0.15, 0.01, 0.7]) # _ = f2.colorbar(ax2, cax=cbar_ax) # # f3.subplots_adjust(right=0.93) # cbar_axx = f3.add_axes([0.95, 0.57, 0.01, 0.3]) # _ = f3.colorbar(ax3x, cax=cbar_axx) # cbar_axy = f3.add_axes([0.95, 0.15, 0.01, 0.3]) # _ = f3.colorbar(ax3y, cax=cbar_axy) # Heatmap for right knee. Note that the body part is encoded in the 3th channel so in this case right knee is # at index 9. All body parts are defined in config: # part_str = [nose, neck, Rsho, Relb, Rwri, Lsho, Lelb, Lwri, Rhip, Rkne, Rank, Lhip, Lkne, Lank, Leye, Reye, Lear, Rear, pt19] # plt.imshow(oriImg[:,:,[2,1,0]]) # plt.imshow(heatmap_avg[:,:,9], alpha=.5) # fig = matplotlib.pyplot.gcf() # cax = matplotlib.pyplot.gca() # fig.set_size_inches(20, 20) # fig.subplots_adjust(right=0.93) # cbar_ax = fig.add_axes([0.95, 0.15, 0.01, 0.7]) # _ = fig.colorbar(ax2, cax=cbar_ax) # paf vectors for right elbow and right wrist from numpy import ma U = paf_avg[:,:,16] * -1 V = paf_avg[:,:,17] X, Y = np.meshgrid(np.arange(U.shape[1]), np.arange(U.shape[0])) M = np.zeros(U.shape, dtype='bool') M[U**2 + V**2 < 0.5 * 0.5] = True U = ma.masked_array(U, mask=M) V = ma.masked_array(V, mask=M) # 1 plt.figure() # plt.imshow(oriImg[:,:,[2,1,0]], alpha = .5) s = 5 Q = plt.quiver(X[::s,::s], Y[::s,::s], U[::s,::s], V[::s,::s], scale=50, headaxislength=4, alpha=.5, width=0.001, color='r') fig = matplotlib.pyplot.gcf() fig.set_size_inches(20, 20) # Visualise all detected body parts. Note that we use peaks in heatmaps from scipy.ndimage.filters import gaussian_filter all_peaks = [] peak_counter = 0 for part in range(19-1): map_ori = heatmap_avg[:,:,part] map = gaussian_filter(map_ori, sigma=3) map_left = np.zeros(map.shape) map_left[1:,:] = map[:-1,:] map_right = np.zeros(map.shape) map_right[:-1,:] = map[1:,:] map_up = np.zeros(map.shape) map_up[:,1:] = map[:,:-1] map_down = np.zeros(map.shape) map_down[:,:-1] = map[:,1:] peaks_binary = np.logical_and.reduce((map>=map_left, map>=map_right, map>=map_up, map>=map_down, map > param['thre1'])) peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse peaks_with_score = [x + (map_ori[x[1],x[0]],) for x in peaks] id = range(peak_counter, peak_counter + len(peaks)) peaks_with_score_and_id = [peaks_with_score[i] + (id[i],) for i in range(len(id))] all_peaks.append(peaks_with_score_and_id) peak_counter += len(peaks) # find connection in the specified sequence, center 29 is in the position 15 limbSeq = [[2,3], [2,6], [3,4], [4,5], [6,7], [7,8], [2,9], [9,10], [10,11], [2,12], [12,13], [13,14], [2,1], [1,15], [15,17], [1,16], [16,18], [3,17], [6,18]] # the middle joints heatmap correpondence mapIdx = [[31,32], [39,40], [33,34], [35,36], [41,42], [43,44], [19,20], [21,22], [23,24], [25,26], [27,28], [29,30], [47,48], [49,50], [53,54], [51,52], [55,56], [37,38], [45,46]] connection_all = [] special_k = [] mid_num = 10 for k in range(len(mapIdx)): score_mid = paf_avg[:,:,[x-19 for x in mapIdx[k]]] candA = all_peaks[limbSeq[k][0]-1] candB = all_peaks[limbSeq[k][1]-1] nA = len(candA) nB = len(candB) indexA, indexB = limbSeq[k] if(nA != 0 and nB != 0): connection_candidate = [] for i in range(nA): for j in range(nB): vec = np.subtract(candB[j][:2], candA[i][:2]) norm = math.sqrt(vec[0]*vec[0] + vec[1]*vec[1]) vec = np.divide(vec, norm) startend = list(zip(
np.linspace(candA[i][0], candB[j][0], num=mid_num)
numpy.linspace
# coding=utf-8 # Copyright 2020 The Trax Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for Transformer models.""" import functools from absl.testing import absltest from absl.testing import parameterized import numpy as np from trax import fastmath from trax import shapes from trax.models import transformer class TransformerTest(parameterized.TestCase): def test_transformer_lm_forward_shape(self): vocab_size = 16 model = transformer.TransformerLM( vocab_size, d_model=32, d_ff=64, n_layers=2, n_heads=2) x = np.ones((3, 5)).astype(np.int32) _, _ = model.init(shapes.signature(x)) y = model(x) self.assertEqual(y.shape, (3, 5, vocab_size)) def _test_transformer_forward_shape(self, input_vocab_size, output_vocab_size): model = transformer.Transformer( input_vocab_size, output_vocab_size, d_model=32, d_ff=64, n_encoder_layers=2, n_decoder_layers=2, n_heads=2) xs = [np.ones((3, 5)).astype(np.int32), np.ones((3, 5)).astype(np.int32)] _, _ = model.init(shapes.signature(xs)) y, _ = model(xs) vocab_size = output_vocab_size or input_vocab_size self.assertEqual(y.shape, (3, 5, vocab_size)) def test_transformer_noencdec_forward_shape(self): input_vocab_size = 16 output_vocab_size = 16 model = transformer.TransformerNoEncDecAttention( input_vocab_size, output_vocab_size, d_model=32, d_ff=64, n_encoder_layers=2, n_decoder_layers=2, n_heads=2) enc_toks = np.array( [[6, 2, 0, 0, 0, 0], [6, 3, 7, 0, 0, 0]]) dec_toks = np.array( [[4, 2, 0, 0], [8, 5, 0, 0]]) xs = [enc_toks, dec_toks] _, _ = model.init(shapes.signature(xs)) # decoder output, decoder mask ys = model(xs) # (B, L2, H) self.assertEqual(ys[0].shape, (dec_toks.shape[0], dec_toks.shape[1], output_vocab_size)) self.assertEqual(ys[1].shape, dec_toks.shape) @parameterized.named_parameters( ('same_vocab', 16, None), ('same_size', 16, 16), ('different_size', 16, 50)) def test_transformer_forward_shape(self, input_vocab_size, output_vocab_size): """Run the Transformer forward and check output shape.""" self._test_transformer_forward_shape(input_vocab_size, output_vocab_size) def _test_fast_inference(self, length): with fastmath.use_backend('jax'): vocab_size = 16 model_fn = functools.partial( transformer.TransformerLM, vocab_size=vocab_size, d_model=4, d_ff=8, n_layers=2, n_heads=2, ) model_slow = model_fn(mode='eval') model_fast = model_fn(mode='predict') rng = fastmath.random.get_prng(0) batch_size = 2 input_signature = shapes.ShapeDtype((batch_size, 1), np.int32) # Given the same rng, both models initialize with the same parameters. model_slow.init(input_signature, rng) model_fast.init(input_signature, rng) buf = np.zeros((batch_size, length), dtype=np.int32) next_sym = np.zeros((batch_size, 1), dtype=np.int32) for index in range(length): logits_slow = model_slow(buf, rng=rng) logits_fast = model_fast(next_sym, rng=rng) np.testing.assert_array_almost_equal( logits_slow[:, index, :], logits_fast[:, 0, :], decimal=5, ) next_sym = np.random.randint(vocab_size, size=(batch_size, 1)) buf[:, index] = next_sym[:, 0] def test_dot_product_causal_attention_fast_inference(self): self._test_fast_inference(length=5) def test_concat_with_padding(self): vec_e = np.array( [[[7, 5, 2, 8, 8, 8, 6, 7], [8, 2, 6, 2, 1, 1, 4, 2], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], [[4, 3, 1, 7, 5, 6, 2, 1], [6, 9, 9, 4, 1, 3, 2, 1], [3, 8, 2, 4, 7, 9, 4, 1], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]] ) # vec_e[:,:,0] != 0 mask_e = np.array([[True, True, False, False, False, False], [True, True, True, False, False, False]]) vec_d = np.array( [[[4, 7, 7, 4, 8, 9, 9, 9], [6, 8, 2, 9, 3, 6, 6, 8], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], [[3, 7, 5, 6, 2, 9, 3, 1], [4, 7, 3, 2, 1, 1, 1, 6], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]] ) mask_d = np.array( [[True, True, False, False], [True, True, False, False]]) layer = transformer._ConcatWithPadding() y = layer((vec_e, vec_d, mask_e, mask_d)) np.testing.assert_equal( y, np.array( [[[7, 5, 2, 8, 8, 8, 6, 7], [8, 2, 6, 2, 1, 1, 4, 2], [4, 7, 7, 4, 8, 9, 9, 9], [6, 8, 2, 9, 3, 6, 6, 8], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], [[4, 3, 1, 7, 5, 6, 2, 1], [6, 9, 9, 4, 1, 3, 2, 1], [3, 8, 2, 4, 7, 9, 4, 1], [3, 7, 5, 6, 2, 9, 3, 1], [4, 7, 3, 2, 1, 1, 1, 6], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]] ) ) def test_strip_from_concatenate_with_padding(self): enc_dec = np.array( [[[7, 5, 2, 8, 8, 8, 6, 7], [8, 2, 6, 2, 1, 1, 4, 2], [4, 7, 7, 4, 8, 9, 9, 9], [6, 8, 2, 9, 3, 6, 6, 8], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], [[4, 3, 1, 7, 5, 6, 2, 1], [6, 9, 9, 4, 1, 3, 2, 1], [3, 8, 2, 4, 7, 9, 4, 1], [3, 7, 5, 6, 2, 9, 3, 1], [4, 7, 3, 2, 1, 1, 1, 6], [4, 7, 3, 2, 1, 1, 1, 6], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]] ) tok_e = np.array([[7, 8, 0, 0, 0, 0], [4, 6, 3, 0, 0, 0]]) tok_d =
np.array([[4, 6, 0, 0], [3, 4, 1, 0]])
numpy.array
""" Dynamic endpoints on eICU """ import numpy as np import pandas as pd import functions.util_array as mlhc_array class DynamicEndpointExtractor(): def __init__(self): self.create_pid_col = True # The horizons at the end which are marked as patient severity self.back_horizons = [1, 6, 12, 24] self.unit_discharge_categories = {"home": ["Home"], "telemetry": ["Telemetry"], "floor": ["Floor"], "step_down_unit": ["Step-Down Unit (SDU)"], "acute_care_floor": ["Acute Care/Floor"], "other_icu": ["Other ICU", "ICU", "Other ICU (CABG)"], "expired": ["Death"], "skilled_nursing_facility": ["Skilled Nursing Facility"], "other_hospital": ["Other Hospital"]} self.hospital_discharge_categories = {"home": ["Home"], "skilled_nursing_facility": ["Skilled Nursing Facility"], "expired": ["Death"], "rehabilitation": ["Rehabilitation"], "other_hospital": ["Other Hospital"], "nursing_home": ["Nursing Home"]} # The variables that are to be used as critical thresholds self.relevant_variables_vitals = ["temperature", "systemicmean", "respiration"] self.relevant_variables_lab = ["HCO3", "sodium", "potassium", "creatinine"] def transform(self, df_imputed, df_pat, pid=None): df_out_dict = {} if self.create_pid_col: df_out_dict["patientunitstayid"] = mlhc_array.value_empty(df_imputed.shape[0], pid, dtype=np.int64) df_out_dict["ts"] = df_imputed["ts"] rel_row = df_pat.iloc[0] hospital_discharge_location = str(rel_row["hospitaldischargelocation"]).strip() unit_discharge_location = str(rel_row["unitdischargelocation"]).strip() for var, vnames in self.unit_discharge_categories.items(): if unit_discharge_location in vnames: for hor in self.back_horizons: arr = np.zeros(df_imputed.shape[0], dtype=np.float64) arr[-hor:] = 1.0 df_out_dict["unit_discharge_{}_{}".format(var, hor)] = arr else: for hor in self.back_horizons: arr = np.zeros(df_imputed.shape[0], dtype=np.float64) df_out_dict["unit_discharge_{}_{}".format(var, hor)] = arr for var, vnames in self.hospital_discharge_categories.items(): if hospital_discharge_location in vnames: for hor in self.back_horizons: arr = np.zeros(df_imputed.shape[0], dtype=np.float64) arr[-hor:] = 1.0 df_out_dict["hospital_discharge_{}_{}".format(var, hor)] = arr else: for hor in self.back_horizons: arr = np.zeros(df_imputed.shape[0], dtype=np.float64) df_out_dict["hospital_discharge_{}_{}".format(var, hor)] = arr # Process the vital sign variables of interest temperature = np.array(df_imputed["vs_temperature"]) abpm =
np.array(df_imputed["vs_systemicmean"])
numpy.array
#!/usr/bin/env python # -*- coding: utf-8 -*- r"""RBDL model interface used in priority tasks. This is based on the implementation in `https://github.com/ADVRHumanoids/ModelInterfaceRBDL`, which is licensed under the LPGLv3. References: - [1] "OpenSoT: A whole-body control library for the compliant humanoid robot COMAN", Rocchi et al., 2015 - [2] "Robot Control for Dummies: Insights and Examples using OpenSoT", Hoffman et al., 2017 - [3] "Rigid Body Dynamics Algorithms", Featherstone, 2008 """ import numpy as np import rbdl from pyrobolearn.priorities.models import ModelInterface from pyrobolearn.utils.transformation import get_quaternion_from_matrix __author__ = "<NAME>" __copyright__ = "Copyright 2019, PyRoboLearn" __credits__ = ["<NAME> (C++)", "<NAME> (C++)", "<NAME> (Python + doc)"] __license__ = "GNU GPLv3" __version__ = "1.0.0" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "Development" class RBDLModelInterface(ModelInterface): r"""RBDL Model interface. """ def __init__(self, urdf, floating_base=False, verbose=False): """ Initialize the RBDL model interface. Args: urdf (str): path to the URDF file. floating_base (bool): set this variable to True, if we have a floating-based robot. verbose (bool): if True, it will print information when loading the URDF. """ # load the RBDL model model = rbdl.loadModel(filename=urdf.encode(), floating_base=floating_base, verbose=verbose) # call parent constructor super(RBDLModelInterface, self).__init__(model) # define joint attributes self.zeros = np.zeros(self.model.q_size) self._q = np.zeros(self.model.q_size) self._dq = np.zeros(self.model.qdot_size) self._ddq = np.zeros(self.model.qdot_size) self.mass = 0 for body_id in range(len(self.model.mBodies)): body = self.model.mBodies[body_id] self.mass += body.mMass self.com = np.zeros(3) self.com_vel = np.zeros(3) self.com_acc =
np.zeros(3)
numpy.zeros
import numpy as np from numpy.testing import dec import statsmodels.api as sm from statsmodels.graphics.gofplots import qqplot, qqline, ProbPlot from scipy import stats try: import matplotlib.pyplot as plt import matplotlib if matplotlib.__version__ < '1': raise have_matplotlib = True except: have_matplotlib = False @
dec.skipif(not have_matplotlib)
numpy.testing.dec.skipif
""" prepare.py: Functions for getting ready for geometry optimization Copyright 2016-2020 Regents of the University of California and the Authors Authors: <NAME>, <NAME> Contributors: <NAME>, <NAME>, <NAME> Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ from __future__ import division import os import itertools import numpy as np import shutil import os from .internal import ( Distance, Angle, Dihedral, CartesianX, CartesianY, CartesianZ, TranslationX, TranslationY, TranslationZ, RotationA, RotationB, RotationC, ) from .engine import ( set_tcenv, load_tcin, TeraChem, ConicalIntersection, Psi4, QChem, Gromacs, Molpro, OpenMM, QCEngineAPI, Gaussian, ) from .rotate import calc_fac_dfac from .molecule import Molecule, Elements from .nifty import logger, isint, uncommadash, bohr2ang, ang2bohr from .rotate import calc_fac_dfac def get_molecule_engine(**kwargs): """ Parameters ---------- args : namespace Command line arguments from argparse Returns ------- Molecule Molecule object containing necessary optimization info Engine Engine object containing methods for calculating energy and gradient """ ### Set up based on which quantum chemistry code we're using (defaults to TeraChem). engine_str = kwargs.get("engine", None) customengine = kwargs.get("customengine", None) # Path to Molpro executable (used if molpro=True) molproexe = kwargs.get("molproexe", None) # PDB file will be read for residue IDs to make TRICs for fragments # and provide starting coordinates in the case of OpenMM pdb = kwargs.get("pdb", None) # if frag=True, do not add a bond between residues. frag = kwargs.get("frag", False) # Number of threads to use (engine-dependent) threads = kwargs.get("nt", None) # Name of the input file. inputf = kwargs.get("input") # Name of temporary directory for calculations, needed by some engines. dirname = kwargs.get("dirname", None) # Temporary directory generated by a previous Q-Chem calculation, may be used at the beginning of a geomeTRIC calculation qcdir = kwargs.get("qcdir", None) # Spines just for AAT porpouse, Default espin=0 spins = kwargs.get("espines", 0) temperature = kwargs.get("Temperature", 90.0) rcut = kwargs.get("rcutOff", 1.0) Oxygens = { "0": False, "1": False, "2": False, "3": False, "4": False, "5": False, "Temperatura": temperature, "rcut": rcut, } Oxygens[str(spins)] = True # End modified by AAT ## MECI calculations create a custom engine that contains two other engines. if kwargs.get("meci", None): if ( engine_str.lower() in ["psi4", "gmx", "molpro", "qcengine", "openmm", "gaussian"] or customengine ): logger.warning( "MECI optimizations are not tested with engines: psi4, gmx, molpro, qcengine, openmm, gaussian, customengine. Be Careful!" ) ## If 'engine' is provided as the argument to 'meci', then we assume the engine is # directly returning the MECI objective function and gradient. if kwargs["meci"].lower() == "engine": sub_kwargs = kwargs.copy() sub_kwargs["meci"] = None M, engine = get_molecule_engine(**sub_kwargs) else: meci_sigma = kwargs.get("meci_sigma", 3.5) meci_alpha = kwargs.get("meci_alpha", 0.025) sub_engines = {} for state in [1, 2]: sub_kwargs = kwargs.copy() if state == 2: sub_kwargs["input"] = kwargs["meci"] sub_kwargs["meci"] = None M, sub_engine = get_molecule_engine(**sub_kwargs) sub_engines[state] = sub_engine engine = ConicalIntersection( M, sub_engines[1], sub_engines[2], meci_sigma, meci_alpha ) return M, engine ## Read radii from the command line. # Cations should have radii of zero. arg_radii = kwargs.get("radii", ["Na", "0.0", "K", "0.0"]) if (len(arg_radii) % 2) != 0: raise RuntimeError("Must have an even number of arguments for radii") nrad = int(len(arg_radii) / 2) radii = {} for i in range(nrad): radii[arg_radii[2 * i].capitalize()] = float(arg_radii[2 * i + 1]) using_qchem = False threads_enabled = False if engine_str: engine_str = engine_str.lower() if engine_str[:4] == "tera": engine_str = "tera" if engine_str not in [ "tera", "qchem", "psi4", "gmx", "molpro", "openmm", "qcengine", "gaussian", ]: raise RuntimeError( "Valid values of engine are: tera, qchem, psi4, gmx, molpro, openmm, qcengine, gaussian" ) if customengine: raise RuntimeError("engine and customengine cannot simultaneously be set") if engine_str == "tera": logger.info( "TeraChem engine selected. Expecting TeraChem input for gradient calculation.\n" ) set_tcenv() tcin = load_tcin(inputf) # The QM-MM interface is designed on the following ideas: # 1) We are only optimizing the QM portion of the system # (until we implement fast inversion of G matrices and Hessians) # 2) The geomeTRIC optimizer only "sees" the part of the molecule being optimized. # 3) The TeraChem engine writes .rst7 files instead of .xyz files by inserting the # optimization coordinates into the correct locations. qmmm = "qmindices" in tcin if qmmm: from simtk.openmm.app import AmberPrmtopFile # Need to build a molecule object for the portion of the system being optimized # We rely on OpenMM's AmberPrmtopFile class to read the .prmtop file if not os.path.exists(tcin["coordinates"]): raise RuntimeError("TeraChem QM/MM coordinate file does not exist") if not os.path.exists(tcin["prmtop"]): raise RuntimeError("TeraChem QM/MM prmtop file does not exist") if not os.path.exists(tcin["qmindices"]): raise RuntimeError("TeraChem QM/MM qmindices file does not exist") prmtop_name = tcin["prmtop"] prmtop = AmberPrmtopFile(prmtop_name) M_full = Molecule( tcin["coordinates"], ftype="inpcrd", build_topology=False ) M_full.elem = [a.element.symbol for a in list(prmtop.topology.atoms())] M_full.resid = [a.residue.index for a in list(prmtop.topology.atoms())] qmindices_name = tcin["qmindices"] qmindices = [ int(i.split()[0]) for i in open(qmindices_name).readlines() ] M = M_full.atom_select(qmindices) M.top_settings["radii"] = radii M.top_settings["fragment"] = frag M.build_topology() elif pdb is not None: M = Molecule(pdb, radii=radii, fragment=frag) else: if not os.path.exists(tcin["coordinates"]): raise RuntimeError("TeraChem coordinate file does not exist") M = Molecule(tcin["coordinates"], radii=radii, fragment=frag) M.charge = tcin["charge"] M.mult = tcin.get("spinmult", 1) # The TeraChem engine needs to write rst7 files before calling TC # and also make sure the prmtop and qmindices.txt files are present. engine = TeraChem(M, tcin, dirname=dirname) elif engine_str == "qchem": logger.info( "Q-Chem engine selected. Expecting Q-Chem input for gradient calculation.\n" ) # The file from which we make the Molecule object if pdb is not None: # If we pass the PDB, then read both the PDB and the Q-Chem input file, # then copy the Q-Chem rem variables over to the PDB M = Molecule(pdb, radii=radii, fragment=frag) M1 = Molecule(inputf, radii=radii) for i in ["qctemplate", "qcrems", "elem", "qm_ghost", "charge", "mult"]: if i in M1: M[i] = M1[i] else: M = Molecule(inputf, radii=radii) engine = QChem(M, dirname=dirname, qcdir=qcdir, threads=threads) using_qchem = True threads_enabled = True elif engine_str == "gmx": logger.info( "Gromacs engine selected. Expecting conf.gro, topol.top and shot.mdp (exact names).\n" ) M = Molecule(inputf, radii=radii, fragment=frag) if pdb is not None: M = Molecule(pdb, radii=radii, fragment=frag) if "boxes" in M.Data: del M.Data["boxes"] engine = Gromacs(M) elif engine_str == "openmm": logger.info( "OpenMM engine selected. Expecting forcefield.xml or system.xml file, and PDB passed in via --pdb.\n" ) if pdb is None: raise RuntimeError("Must pass a PDB with option --pdb to use OpenMM.") M = Molecule(pdb, radii=radii, fragment=frag) if "boxes" in M.Data: del M.Data["boxes"] engine = OpenMM(M, pdb, inputf, **Oxygens) elif engine_str == "psi4": logger.info( "Psi4 engine selected. Expecting Psi4 input for gradient calculation.\n" ) engine = Psi4(threads=threads) engine.load_psi4_input(inputf) if pdb is not None: M = Molecule(pdb, radii=radii, fragment=frag) M1 = engine.M for i in ["elem"]: if i in M1: M[i] = M1[i] else: M = engine.M M.top_settings["radii"] = radii threads_enabled = True elif engine_str == "molpro": logger.info( "Molpro engine selected. Expecting Molpro input for gradient calculation.\n" ) engine = Molpro(threads=threads) engine.load_molpro_input(inputf) M = engine.M if molproexe is not None: engine.set_molproexe(molproexe) threads_enabled = True elif engine_str == "gaussian": logger.info( "Gaussian engine selected. Expecting Gaussian input for gradient calculation. \n" ) M = Molecule(inputf, radii=radii, fragment=frag) # now work out which gaussian version we have if shutil.which("g16") is not None: exe = "g16" elif shutil.which("g09") is not None: exe = "g09" else: raise ValueError( "Neither g16 or g09 was found, please check the environment." ) engine = Gaussian(molecule=M, exe=exe, threads=threads) threads_enabled = True logger.info("The gaussian engine exe is set as %s" % engine.gaussian_exe) # load the template into the engine engine.load_gaussian_input(inputf) elif engine_str == "qcengine": logger.info("QCEngine selected.\n") schema = kwargs.get("qcschema", False) if schema is False: raise RuntimeError("QCEngineAPI option requires a QCSchema") program = kwargs.get("qce_program", False) if program is False: raise RuntimeError("QCEngineAPI option requires a qce_program option") engine = QCEngineAPI(schema, program) M = engine.M else: raise RuntimeError( "Failed to create an engine object, this might be a bug in get_molecule_engine" ) elif customengine: logger.info("Custom engine selected.\n") engine = customengine M = engine.M else: raise RuntimeError( "Neither engine name nor customengine object was provided.\n" ) # If --coords is provided via command line, use final coordinate set in the provided file # to override all previously provided coordinates. arg_coords = kwargs.get("coords", None) if arg_coords is not None: M1 = Molecule(arg_coords) M1 = M1[-1] M.xyzs = M1.xyzs # Perform some sanity checks on arguments if not using_qchem and qcdir: raise EngineError( "qcdir keyword argument passed to get_molecule_engine but Q-Chem engine is not being used" ) if threads and not threads_enabled: raise RuntimeError( "Setting number of threads not configured to work with %s yet" % engine_str ) return M, engine def one_dimensional_scan(init, final, steps): """ Return a list of N equally spaced values between initial and final. This method works with lists of numbers Parameters ---------- init : list List of numbers to be interpolated final : np.ndarray or list List of final numbers, must have same shape as "init" steps : int Number of interpolation steps Returns ------- list List of lists that interpolate between init and final, including endpoints. """ if len(init) != len(final): raise RuntimeError("init and final must have the same length") Answer = [] for j in range(len(init)): Answer.append(np.linspace(init[j], final[j], steps)) Answer = list([list(i) for i in np.array(Answer).T]) return Answer def parse_constraints(molecule, constraints_string): """ Parameters ---------- molecule : Molecule Molecule object constraints_string : str String containing the constraint specification. Returns ------- objs : list List of primitive internal coordinates corresponding to the constraints valgrps : list List of lists of constraint values. (There are multiple lists when we are scanning) """ mode = None Freezes = [] # The key in this dictionary is for looking up the following information: # 1) The classes for creating the primitive coordinates corresponding to the constraint # 2) The number of atomic indices that are required to specify the constraint ClassDict = { "distance": ([Distance], 2), "angle": ([Angle], 3), "dihedral": ([Dihedral], 4), "x": ([CartesianX], 1), "y": ([CartesianY], 1), "z": ([CartesianZ], 1), "xy": ([CartesianX, CartesianY], 1), "xz": ([CartesianX, CartesianZ], 1), "yz": ([CartesianY, CartesianZ], 1), "xyz": ([CartesianX, CartesianY, CartesianZ], 1), "trans-x": ([TranslationX], 1), "trans-y": ([TranslationY], 1), "trans-z": ([TranslationZ], 1), "trans-xy": ([TranslationX, TranslationY], 1), "trans-xz": ([TranslationX, TranslationZ], 1), "trans-yz": ([TranslationY, TranslationZ], 1), "trans-xyz": ([TranslationX, TranslationY, TranslationZ], 1), "rotation": ([RotationA, RotationB, RotationC], 1), } AtomKeys = ["x", "y", "z", "xy", "yz", "xz", "xyz"] TransKeys = [ "trans-x", "trans-y", "trans-z", "trans-xy", "trans-yz", "trans-xz", "trans-xyz", ] objs = [] vals = [] coords = molecule.xyzs[0].flatten() * ang2bohr in_options = False for line in constraints_string.split("\n"): # Skip over the options block in the constraints file if "$options" in line: in_options = True logger.info( "-> Additional optimizer options provided in the constraints file:\n" ) if in_options: if "$end" in line: in_options = False if len(line) > 0: logger.info("-> " + line + "\n") continue # End skipping over the options block line = line.split("#")[0].strip().lower() if len(line) == 0: continue logger.info(line + "\n") # This is a list-of-lists. The intention is to create a multidimensional grid # of constraint values if necessary. if line.startswith("$"): mode = line.replace("$", "") else: if mode is None: raise RuntimeError( "Mode ($freeze, $set, $scan) must be set before specifying any constraints" ) s = line.split() key = s[0] if "".join(sorted(key)) in AtomKeys: key = "".join(sorted(key)) elif "".join(sorted(key.replace("trans-", ""))) in AtomKeys: key = "trans-" + "".join(sorted(key.replace("trans-", ""))) classes, n_atom = ClassDict[key] if mode == "freeze": ntok = n_atom elif mode == "set": if key == "rotation": ntok = n_atom + 4 else: ntok = n_atom + len(classes) elif mode == "scan": if key == "rotation": ntok = n_atom + 6 else: ntok = n_atom + 2 * len(classes) + 1 if len(s) != (ntok + 1): raise RuntimeError( "For this line:%s\nExpected %i tokens but got %i" % (line, ntok + 1, len(s)) ) if key in AtomKeys or key in TransKeys: # Special code that works for atom position and translation constraints. if isint(s[1]): atoms = [int(s[1]) - 1] elif s[1] in [k.lower() for k in Elements]: atoms = [ i for i in range(molecule.na) if molecule.elem[i].lower() == s[1] ] else: atoms = uncommadash(s[1]) if any([i < 0 for i in atoms]): raise RuntimeError("Atom numbers must start from 1") if any([i >= molecule.na for i in atoms]): raise RuntimeError( "Constraints refer to higher atom indices than the number of atoms" ) if key in AtomKeys: # The x-coordinate of all the atoms in a group is a # list of constraints that is scanned in 1-D. for cls in classes: objs.append([cls(a, w=1.0) for a in atoms]) if mode == "freeze": for cls in classes: vals.append([[None for a in atoms]]) elif mode == "set": x1 = [float(i) * ang2bohr for i in s[2 : 2 + len(classes)]] for icls, cls in enumerate(classes): vals.append([[x1[icls] for a in atoms]]) elif mode == "scan": # If we're scanning it, then we add the whole list of distances to the list-of-lists x1 = [float(i) * ang2bohr for i in s[2 : 2 + len(classes)]] x2 = [ float(i) * ang2bohr for i in s[2 + len(classes) : 2 + 2 * len(classes)] ] nstep = int(s[2 + 2 * len(classes)]) valscan = one_dimensional_scan(x1, x2, nstep) for icls, cls in enumerate(classes): vals.append([[v[icls] for a in atoms] for v in valscan]) elif key in TransKeys: # If there is more than one atom and the mode is "set" or "scan", then the # center of mass is constrained, so we pick the corresponding classes. if len(atoms) > 1: objs.append( [ cls(atoms, w=np.ones(len(atoms)) / len(atoms)) for cls in classes ] ) else: objs.append([cls(atoms[0], w=1.0) for cls in classes]) if mode == "freeze": # LPW 2016-02-10: # trans-x, trans-y, trans-z is a GROUP of constraints # Each group of constraints gets a [[None, None, None]] appended to vals vals.append([[None for cls in classes]]) elif mode == "set": # Depending on how many coordinates are constrained, we read in the corresponding # number of constraint values. x1 = [float(i) * ang2bohr for i in s[2 : 2 + len(classes)]] # If there's just one constraint value then we append it to the value list-of-lists vals.append([x1]) elif mode == "scan": # If we're scanning it, then we add the whole list of distances to the list-of-lists x1 = [float(i) * ang2bohr for i in s[2 : 2 + len(classes)]] x2 = [ float(i) * ang2bohr for i in s[2 + len(classes) : 2 + 2 * len(classes)] ] nstep = int(s[2 + 2 * len(classes)]) vals.append(one_dimensional_scan(x1, x2, nstep)) elif key in ["distance", "angle", "dihedral"]: if len(classes) != 1: raise RuntimeError("Not OK!") atoms = [int(i) - 1 for i in s[1 : 1 + n_atom]] if key == "distance" and atoms[0] > atoms[1]: atoms = atoms[::-1] if key == "angle" and atoms[0] > atoms[2]: atoms = atoms[::-1] if key == "dihedral" and atoms[1] > atoms[2]: atoms = atoms[::-1] if any([i < 0 for i in atoms]): raise RuntimeError("Atom numbers must start from 1") if any([i >= molecule.na for i in atoms]): raise RuntimeError( "Constraints refer to higher atom indices than the number of atoms" ) objs.append([classes[0](*atoms)]) if mode == "freeze": vals.append([[None]]) elif mode in ["set", "scan"]: if key == "distance": x1 = float(s[1 + n_atom]) * ang2bohr else: x1 = float(s[1 + n_atom]) * np.pi / 180.0 if mode == "set": vals.append([[x1]]) else: if key == "distance": x2 = float(s[2 + n_atom]) * ang2bohr else: x2 = float(s[2 + n_atom]) * np.pi / 180.0 nstep = int(s[3 + n_atom]) vals.append([[i] for i in list(np.linspace(x1, x2, nstep))]) elif key in ["rotation"]: # User can only specify ranges of atoms atoms = uncommadash(s[1]) sel = coords.reshape(-1, 3)[atoms, :] * ang2bohr sel -= np.mean(sel, axis=0) rg = np.sqrt(np.mean(np.sum(sel ** 2, axis=1))) if mode == "freeze": for cls in classes: objs.append([cls(atoms, coords, {}, w=rg)]) vals.append([[None]]) elif mode in ["set", "scan"]: objs.append([cls(atoms, coords, {}, w=rg) for cls in classes]) # Get the axis u = np.array([float(s[i]) for i in range(2, 5)]) u /= np.linalg.norm(u) # Get the angle theta1 = float(s[5]) * np.pi / 180 # if np.abs(theta1) > np.pi * 0.9: # logger.info("Large rotation: Your constraint may not work\n") if mode == "set": # Get the periodic image that is inside of the pi-sphere. theta3 = (theta1 + np.pi) * (2 * np.pi) - np.pi c = np.cos(theta3 / 2.0) s = np.sin(theta3 / 2.0) q =
np.array([c, u[0] * s, u[1] * s, u[2] * s])
numpy.array
'''It is a sligtly modified version of the official implementation of "Scale-steerable filters for the locally-scale invariant convolutional neural network" Paper: https://arxiv.org/pdf/1906.03861.pdf Code: https://github.com/rghosh92/SS-CNN MIT License Copyright (c) 2020 <NAME>, <NAME> ''' import math import numpy as np import scipy.ndimage import torch import torch.nn as nn from torch.nn import functional as F from torch.nn.parameter import Parameter def generate_filter_basis(filter_size, phi0, sigma, k, scale, phase, drop_rate): rot_k = 0 Mx = (filter_size[0]) My = (filter_size[1]) W = np.ones((filter_size[0], filter_size[1])) W[np.int((Mx - 1) / 2), np.int((My - 1) / 2)] = 0 W_dist = scipy.ndimage.morphology.distance_transform_bf(W) W_dist[
np.int((Mx - 1) / 2)
numpy.int
import unittest from boundary_condition import BoundaryCondition import numpy as np import eno_tools as eno from grid import Grid class TestEnoInterpolation(unittest.TestCase): def test_all_orders_exact(self): """ Test exactness of ENO interpolations for polynomials. ENOp should exactly interpolate polynomials up to degree (p-1), included. """ basedata = np.arange(0., 20., 2.) baseref =
np.arange(1., 21., 2.)
numpy.arange
#%% [markdown] # # k-Nearest Neighbor (kNN) exercise # # *Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.* # # The kNN classifier consists of two stages: # # - During training, the classifier takes the training data and simply remembers it # - During testing, kNN classifies every test image by comparing to all training images and transfering the labels of the k most similar training examples # - The value of k is cross-validated # # In this exercise you will implement these steps and understand the basic Image Classification pipeline, cross-validation, and gain proficiency in writing efficient, vectorized code. #%% # Run some setup code for this notebook. import random import numpy as np import sys sys.path.append('/mnt/c/Users/Dude/Documents/JupyterNotebooks/assignment1') from cs231n.data_utils import load_CIFAR10 import matplotlib.pyplot as plt # This is a bit of magic to make matplotlib figures appear inline in the notebookP # rather than in a new window. get_ipython().run_line_magic('matplotlib', 'inline') plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # Some more magic so that the notebook will reload external python modules; # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') #%% # Load the raw CIFAR-10 data. cifar10_dir = '/mnt/c/Users/Dude/Documents/JupyterNotebooks/assignment1/cs231n/datasets/cifar-10-batches-py' # Cleaning up variables to prevent loading data multiple times (which may cause memory issue) try: del X_train, y_train del X_test, y_test print('Clear previously loaded data.') except: pass X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir) # As a sanity check, we print out the size of the training and test data. print('Training data shape: ', X_train.shape) print('Training labels shape: ', y_train.shape) print('Test data shape: ', X_test.shape) print('Test labels shape: ', y_test.shape) #%% # Visualize some examples from the dataset. # We show a few examples of training images from each class. classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] num_classes = len(classes) samples_per_class = 7 for y, cls in enumerate(classes): idxs = np.flatnonzero(y_train == y) idxs = np.random.choice(idxs, samples_per_class, replace=False) for i, idx in enumerate(idxs): plt_idx = i * num_classes + y + 1 plt.subplot(samples_per_class, num_classes, plt_idx) plt.imshow(X_train[idx].astype('uint8')) plt.axis('off') if i == 0: plt.title(cls) plt.show() #%% # Subsample the data for more efficient code execution in this exercise num_training = 5000 mask = list(range(num_training)) X_train = X_train[mask] y_train = y_train[mask] num_test = 500 mask = list(range(num_test)) X_test = X_test[mask] y_test = y_test[mask] # Reshape the image data into rows X_train = np.reshape(X_train, (X_train.shape[0], -1)) X_test = np.reshape(X_test, (X_test.shape[0], -1)) print(X_train.shape, X_test.shape) #%% from cs231n.classifiers import KNearestNeighbor # Create a kNN classifier instance. # Remember that training a kNN classifier is a noop: # the Classifier simply remembers the data and does no further processing classifier = KNearestNeighbor() classifier.train(X_train, y_train) #%% [markdown] # We would now like to classify the test data with the kNN classifier. Recall that we can break down this process into two steps: # # 1. First we must compute the distances between all test examples and all train examples. # 2. Given these distances, for each test example we find the k nearest examples and have them vote for the label # # Lets begin with computing the distance matrix between all training and test examples. For example, if there are **Ntr** training examples and **Nte** test examples, this stage should result in a **Nte x Ntr** matrix where each element (i,j) is the distance between the i-th test and j-th train example. # # **Note: For the three distance computations that we require you to implement in this notebook, you may not use the np.linalg.norm() function that numpy provides.** # # First, open `cs231n/classifiers/k_nearest_neighbor.py` and implement the function `compute_distances_two_loops` that uses a (very inefficient) double loop over all pairs of (test, train) examples and computes the distance matrix one element at a time. #%% # Open cs231n/classifiers/k_nearest_neighbor.py and implement # compute_distances_two_loops. # Test your implementation: dists = classifier.compute_distances_two_loops(X_test) print(dists.shape) #%% # We can visualize the distance matrix: each row is a single test example and # its distances to training examples plt.imshow(dists, interpolation='none') plt.show() #%% [markdown] # **Inline Question 1** # # Notice the structured patterns in the distance matrix, where some rows or columns are visible brighter. (Note that with the default color scheme black indicates low distances while white indicates high distances.) # # - What in the data is the cause behind the distinctly bright rows? # - What causes the columns? # # $\color{blue}{\textit Your Answer:}$ *fill this in.* # # #%% # Now implement the function predict_labels and run the code below: # We use k = 1 (which is Nearest Neighbor). y_test_pred = classifier.predict_labels(dists, k=1) # Compute and print the fraction of correctly predicted examples num_correct = np.sum(y_test_pred == y_test) accuracy = float(num_correct) / num_test print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)) #%% [markdown] # You should expect to see approximately `27%` accuracy. Now lets try out a larger `k`, say `k = 5`: #%% y_test_pred = classifier.predict_labels(dists, k=5) num_correct = np.sum(y_test_pred == y_test) accuracy = float(num_correct) / num_test print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)) #%% [markdown] # You should expect to see a slightly better performance than with `k = 1`. #%% [markdown] # **Inline Question 2** # # We can also use other distance metrics such as L1 distance. # For pixel values $p_{ij}^{(k)}$ at location $(i,j)$ of some image $I_k$, # # the mean $\mu$ across all pixels over all images is $$\mu=\frac{1}{nhw}\sum_{k=1}^n\sum_{i=1}^{h}\sum_{j=1}^{w}p_{ij}^{(k)}$$ # And the pixel-wise mean $\mu_{ij}$ across all images is # $$\mu_{ij}=\frac{1}{n}\sum_{k=1}^np_{ij}^{(k)}.$$ # The general standard deviation $\sigma$ and pixel-wise standard deviation $\sigma_{ij}$ is defined similarly. # # Which of the following preprocessing steps will not change the performance of a Nearest Neighbor classifier that uses L1 distance? Select all that apply. # 1. Subtracting the mean $\mu$ ($\tilde{p}_{ij}^{(k)}=p_{ij}^{(k)}-\mu$.) # will note change offsets both # 2. Subtracting the per pixel mean $\mu_{ij}$ ($\tilde{p}_{ij}^{(k)}=p_{ij}^{(k)}-\mu_{ij}$.) # will not change offset both # 3. Subtracting the mean $\mu$ and dividing by the standard deviation $\sigma$. # will change - $\sigma$ scales results # 4. Subtracting the pixel-wise mean $\mu_{ij}$ and dividing by the pixel-wise standard deviation $\sigma_{ij}$. # will change - $\sigma_{ij}$ scales results # 5. Rotating the coordinate axes of the data. # will note change # # $\color{blue}{\textit Your Answer:}$ # 1,2,5 # # $\color{blue}{\textit Your Explanation:}$ # #%% # Now lets speed up distance matrix computation by using partial vectorization # with one loop. Implement the function compute_distances_one_loop and run the # code below: dists_one = classifier.compute_distances_one_loop(X_test) # To ensure that our vectorized implementation is correct, we make sure that it # agrees with the naive implementation. There are many ways to decide whether # two matrices are similar; one of the simplest is the Frobenius norm. In case # you haven't seen it before, the Frobenius norm of two matrices is the square # root of the squared sum of differences of all elements; in other words, reshape # the matrices into vectors and compute the Euclidean distance between them. difference = np.linalg.norm(dists - dists_one, ord='fro') print('One loop difference was: %f' % (difference, )) if difference < 0.001: print('Good! The distance matrices are the same') else: print('Uh-oh! The distance matrices are different') #%% # Now implement the fully vectorized version inside compute_distances_no_loops # and run the code dists_two = classifier.compute_distances_no_loops(X_test) # check that the distance matrix agrees with the one we computed before: difference = np.linalg.norm(dists - dists_two, ord='fro') print('No loop difference was: %f' % (difference, )) if difference < 0.001: print('Good! The distance matrices are the same') else: print('Uh-oh! The distance matrices are different') #%% # Let's compare how fast the implementations are def time_function(f, *args): """ Call a function f with args and return the time (in seconds) that it took to execute. """ import time tic = time.time() f(*args) toc = time.time() return toc - tic two_loop_time = time_function(classifier.compute_distances_two_loops, X_test) print('Two loop version took %f seconds' % two_loop_time) one_loop_time = time_function(classifier.compute_distances_one_loop, X_test) print('One loop version took %f seconds' % one_loop_time) no_loop_time = time_function(classifier.compute_distances_no_loops, X_test) print('No loop version took %f seconds' % no_loop_time) # You should see significantly faster performance with the fully vectorized implementation! # NOTE: depending on what machine you're using, # you might not see a speedup when you go from two loops to one loop, # and might even see a slow-down. #%% [markdown] # ### Cross-validation # # We have implemented the k-Nearest Neighbor classifier but we set the value k = 5 arbitrarily. We will now determine the best value of this hyperparameter with cross-validation. #%% num_folds = 5 k_choices = [1, 3, 5, 8, 10, 12, 15, 20, 50, 100] X_train_folds = [] y_train_folds = [] ################################################################################ # TODO: # # Split up the training data into folds. After splitting, X_train_folds and # # y_train_folds should each be lists of length num_folds, where # # y_train_folds[i] is the label vector for the points in X_train_folds[i]. # # Hint: Look up the numpy array_split function. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** X_train_folds = np.array_split(X_train, num_folds) y_train_folds =
np.array_split(y_train, num_folds)
numpy.array_split
# This module has been generated automatically from space group information # obtained from the Computational Crystallography Toolbox # """ Space groups This module contains a list of all the 230 space groups that can occur in a crystal. The variable space_groups contains a dictionary that maps space group numbers and space group names to the corresponding space group objects. .. moduleauthor:: <NAME> <<EMAIL>> """ #----------------------------------------------------------------------------- # Copyright (C) 2013 The Mosaic Development Team # # Distributed under the terms of the BSD License. The full license is in # the file LICENSE.txt, distributed as part of this software. #----------------------------------------------------------------------------- import numpy as N class SpaceGroup(object): """ Space group All possible space group objects are created in this module. Other modules should access these objects through the dictionary space_groups rather than create their own space group objects. """ def __init__(self, number, symbol, transformations): """ :param number: the number assigned to the space group by international convention :type number: int :param symbol: the Hermann-Mauguin space-group symbol as used in PDB and mmCIF files :type symbol: str :param transformations: a list of space group transformations, each consisting of a tuple of three integer arrays (rot, tn, td), where rot is the rotation matrix and tn/td are the numerator and denominator of the translation vector. The transformations are defined in fractional coordinates. :type transformations: list """ self.number = number self.symbol = symbol self.transformations = transformations self.transposed_rotations = N.array([N.transpose(t[0]) for t in transformations]) self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2] for t in transformations])) def __repr__(self): return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol)) def __len__(self): """ :return: the number of space group transformations :rtype: int """ return len(self.transformations) def symmetryEquivalentMillerIndices(self, hkl): """ :param hkl: a set of Miller indices :type hkl: Scientific.N.array_type :return: a tuple (miller_indices, phase_factor) of two arrays of length equal to the number of space group transformations. miller_indices contains the Miller indices of each reflection equivalent by symmetry to the reflection hkl (including hkl itself as the first element). phase_factor contains the phase factors that must be applied to the structure factor of reflection hkl to obtain the structure factor of the symmetry equivalent reflection. :rtype: tuple """ hkls = N.dot(self.transposed_rotations, hkl) p = N.multiply.reduce(self.phase_factors**hkl, -1) return hkls, p space_groups = {} transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(1, 'P 1', transformations) space_groups[1] = sg space_groups['P 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(2, 'P -1', transformations) space_groups[2] = sg space_groups['P -1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(3, 'P 1 2 1', transformations) space_groups[3] = sg space_groups['P 1 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(4, 'P 1 21 1', transformations) space_groups[4] = sg space_groups['P 1 21 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(5, 'C 1 2 1', transformations) space_groups[5] = sg space_groups['C 1 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(6, 'P 1 m 1', transformations) space_groups[6] = sg space_groups['P 1 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(7, 'P 1 c 1', transformations) space_groups[7] = sg space_groups['P 1 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(8, 'C 1 m 1', transformations) space_groups[8] = sg space_groups['C 1 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(9, 'C 1 c 1', transformations) space_groups[9] = sg space_groups['C 1 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(10, 'P 1 2/m 1', transformations) space_groups[10] = sg space_groups['P 1 2/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(11, 'P 1 21/m 1', transformations) space_groups[11] = sg space_groups['P 1 21/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(12, 'C 1 2/m 1', transformations) space_groups[12] = sg space_groups['C 1 2/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(13, 'P 1 2/c 1', transformations) space_groups[13] = sg space_groups['P 1 2/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(14, 'P 1 21/c 1', transformations) space_groups[14] = sg space_groups['P 1 21/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(15, 'C 1 2/c 1', transformations) space_groups[15] = sg space_groups['C 1 2/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(16, 'P 2 2 2', transformations) space_groups[16] = sg space_groups['P 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(17, 'P 2 2 21', transformations) space_groups[17] = sg space_groups['P 2 2 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(18, 'P 21 21 2', transformations) space_groups[18] = sg space_groups['P 21 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(19, 'P 21 21 21', transformations) space_groups[19] = sg space_groups['P 21 21 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(20, 'C 2 2 21', transformations) space_groups[20] = sg space_groups['C 2 2 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(21, 'C 2 2 2', transformations) space_groups[21] = sg space_groups['C 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(22, 'F 2 2 2', transformations) space_groups[22] = sg space_groups['F 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(23, 'I 2 2 2', transformations) space_groups[23] = sg space_groups['I 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(24, 'I 21 21 21', transformations) space_groups[24] = sg space_groups['I 21 21 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(25, 'P m m 2', transformations) space_groups[25] = sg space_groups['P m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(26, 'P m c 21', transformations) space_groups[26] = sg space_groups['P m c 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(27, 'P c c 2', transformations) space_groups[27] = sg space_groups['P c c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(28, 'P m a 2', transformations) space_groups[28] = sg space_groups['P m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(29, 'P c a 21', transformations) space_groups[29] = sg space_groups['P c a 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(30, 'P n c 2', transformations) space_groups[30] = sg space_groups['P n c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(31, 'P m n 21', transformations) space_groups[31] = sg space_groups['P m n 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(32, 'P b a 2', transformations) space_groups[32] = sg space_groups['P b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(33, 'P n a 21', transformations) space_groups[33] = sg space_groups['P n a 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(34, 'P n n 2', transformations) space_groups[34] = sg space_groups['P n n 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(35, 'C m m 2', transformations) space_groups[35] = sg space_groups['C m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(36, 'C m c 21', transformations) space_groups[36] = sg space_groups['C m c 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(37, 'C c c 2', transformations) space_groups[37] = sg space_groups['C c c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(38, 'A m m 2', transformations) space_groups[38] = sg space_groups['A m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(39, 'A b m 2', transformations) space_groups[39] = sg space_groups['A b m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(40, 'A m a 2', transformations) space_groups[40] = sg space_groups['A m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(41, 'A b a 2', transformations) space_groups[41] = sg space_groups['A b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(42, 'F m m 2', transformations) space_groups[42] = sg space_groups['F m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(43, 'F d d 2', transformations) space_groups[43] = sg space_groups['F d d 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(44, 'I m m 2', transformations) space_groups[44] = sg space_groups['I m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(45, 'I b a 2', transformations) space_groups[45] = sg space_groups['I b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(46, 'I m a 2', transformations) space_groups[46] = sg space_groups['I m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(47, 'P m m m', transformations) space_groups[47] = sg space_groups['P m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(48, 'P n n n :2', transformations) space_groups[48] = sg space_groups['P n n n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(49, 'P c c m', transformations) space_groups[49] = sg space_groups['P c c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(50, 'P b a n :2', transformations) space_groups[50] = sg space_groups['P b a n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(51, 'P m m a', transformations) space_groups[51] = sg space_groups['P m m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(52, 'P n n a', transformations) space_groups[52] = sg space_groups['P n n a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(53, 'P m n a', transformations) space_groups[53] = sg space_groups['P m n a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(54, 'P c c a', transformations) space_groups[54] = sg space_groups['P c c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(55, 'P b a m', transformations) space_groups[55] = sg space_groups['P b a m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(56, 'P c c n', transformations) space_groups[56] = sg space_groups['P c c n'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(57, 'P b c m', transformations) space_groups[57] = sg space_groups['P b c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(58, 'P n n m', transformations) space_groups[58] = sg space_groups['P n n m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(59, 'P m m n :2', transformations) space_groups[59] = sg space_groups['P m m n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(60, 'P b c n', transformations) space_groups[60] = sg space_groups['P b c n'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(61, 'P b c a', transformations) space_groups[61] = sg space_groups['P b c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(62, 'P n m a', transformations) space_groups[62] = sg space_groups['P n m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(63, 'C m c m', transformations) space_groups[63] = sg space_groups['C m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(64, 'C m c a', transformations) space_groups[64] = sg space_groups['C m c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(65, 'C m m m', transformations) space_groups[65] = sg space_groups['C m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(66, 'C c c m', transformations) space_groups[66] = sg space_groups['C c c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(67, 'C m m a', transformations) space_groups[67] = sg space_groups['C m m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(68, 'C c c a :2', transformations) space_groups[68] = sg space_groups['C c c a :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(69, 'F m m m', transformations) space_groups[69] = sg space_groups['F m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,3,3]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,0,3]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(70, 'F d d d :2', transformations) space_groups[70] = sg space_groups['F d d d :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(71, 'I m m m', transformations) space_groups[71] = sg space_groups['I m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(72, 'I b a m', transformations) space_groups[72] = sg space_groups['I b a m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(73, 'I b c a', transformations) space_groups[73] = sg space_groups['I b c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(74, 'I m m a', transformations) space_groups[74] = sg space_groups['I m m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(75, 'P 4', transformations) space_groups[75] = sg space_groups['P 4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(76, 'P 41', transformations) space_groups[76] = sg space_groups['P 41'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(77, 'P 42', transformations) space_groups[77] = sg space_groups['P 42'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(78, 'P 43', transformations) space_groups[78] = sg space_groups['P 43'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(79, 'I 4', transformations) space_groups[79] = sg space_groups['I 4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(80, 'I 41', transformations) space_groups[80] = sg space_groups['I 41'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(81, 'P -4', transformations) space_groups[81] = sg space_groups['P -4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(82, 'I -4', transformations) space_groups[82] = sg space_groups['I -4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(83, 'P 4/m', transformations) space_groups[83] = sg space_groups['P 4/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(84, 'P 42/m', transformations) space_groups[84] = sg space_groups['P 42/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(85, 'P 4/n :2', transformations) space_groups[85] = sg space_groups['P 4/n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(86, 'P 42/n :2', transformations) space_groups[86] = sg space_groups['P 42/n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(87, 'I 4/m', transformations) space_groups[87] = sg space_groups['I 4/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,5,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(88, 'I 41/a :2', transformations) space_groups[88] = sg space_groups['I 41/a :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(89, 'P 4 2 2', transformations) space_groups[89] = sg space_groups['P 4 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(90, 'P 4 21 2', transformations) space_groups[90] = sg space_groups['P 4 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(91, 'P 41 2 2', transformations) space_groups[91] = sg space_groups['P 41 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(92, 'P 41 21 2', transformations) space_groups[92] = sg space_groups['P 41 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(93, 'P 42 2 2', transformations) space_groups[93] = sg space_groups['P 42 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(94, 'P 42 21 2', transformations) space_groups[94] = sg space_groups['P 42 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(95, 'P 43 2 2', transformations) space_groups[95] = sg space_groups['P 43 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(96, 'P 43 21 2', transformations) space_groups[96] = sg space_groups['P 43 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(97, 'I 4 2 2', transformations) space_groups[97] = sg space_groups['I 4 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(98, 'I 41 2 2', transformations) space_groups[98] = sg space_groups['I 41 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(99, 'P 4 m m', transformations) space_groups[99] = sg space_groups['P 4 m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(100, 'P 4 b m', transformations) space_groups[100] = sg space_groups['P 4 b m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(101, 'P 42 c m', transformations) space_groups[101] = sg space_groups['P 42 c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(102, 'P 42 n m', transformations) space_groups[102] = sg space_groups['P 42 n m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(103, 'P 4 c c', transformations) space_groups[103] = sg space_groups['P 4 c c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(104, 'P 4 n c', transformations) space_groups[104] = sg space_groups['P 4 n c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(105, 'P 42 m c', transformations) space_groups[105] = sg space_groups['P 42 m c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(106, 'P 42 b c', transformations) space_groups[106] = sg space_groups['P 42 b c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(107, 'I 4 m m', transformations) space_groups[107] = sg space_groups['I 4 m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(108, 'I 4 c m', transformations) space_groups[108] = sg space_groups['I 4 c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(109, 'I 41 m d', transformations) space_groups[109] = sg space_groups['I 41 m d'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(110, 'I 41 c d', transformations) space_groups[110] = sg space_groups['I 41 c d'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(111, 'P -4 2 m', transformations) space_groups[111] = sg space_groups['P -4 2 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(112, 'P -4 2 c', transformations) space_groups[112] = sg space_groups['P -4 2 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(113, 'P -4 21 m', transformations) space_groups[113] = sg space_groups['P -4 21 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(114, 'P -4 21 c', transformations) space_groups[114] = sg space_groups['P -4 21 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(115, 'P -4 m 2', transformations) space_groups[115] = sg space_groups['P -4 m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(116, 'P -4 c 2', transformations) space_groups[116] = sg space_groups['P -4 c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(117, 'P -4 b 2', transformations) space_groups[117] = sg space_groups['P -4 b 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(118, 'P -4 n 2', transformations) space_groups[118] = sg space_groups['P -4 n 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(119, 'I -4 m 2', transformations) space_groups[119] = sg space_groups['I -4 m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(120, 'I -4 c 2', transformations) space_groups[120] = sg space_groups['I -4 c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(121, 'I -4 2 m', transformations) space_groups[121] = sg space_groups['I -4 2 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(122, 'I -4 2 d', transformations) space_groups[122] = sg space_groups['I -4 2 d'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(123, 'P 4/m m m', transformations) space_groups[123] = sg space_groups['P 4/m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(124, 'P 4/m c c', transformations) space_groups[124] = sg space_groups['P 4/m c c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(125, 'P 4/n b m :2', transformations) space_groups[125] = sg space_groups['P 4/n b m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(126, 'P 4/n n c :2', transformations) space_groups[126] = sg space_groups['P 4/n n c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(127, 'P 4/m b m', transformations) space_groups[127] = sg space_groups['P 4/m b m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(128, 'P 4/m n c', transformations) space_groups[128] = sg space_groups['P 4/m n c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(129, 'P 4/n m m :2', transformations) space_groups[129] = sg space_groups['P 4/n m m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(130, 'P 4/n c c :2', transformations) space_groups[130] = sg space_groups['P 4/n c c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(131, 'P 42/m m c', transformations) space_groups[131] = sg space_groups['P 42/m m c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(132, 'P 42/m c m', transformations) space_groups[132] = sg space_groups['P 42/m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(133, 'P 42/n b c :2', transformations) space_groups[133] = sg space_groups['P 42/n b c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(134, 'P 42/n n m :2', transformations) space_groups[134] = sg space_groups['P 42/n n m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(135, 'P 42/m b c', transformations) space_groups[135] = sg space_groups['P 42/m b c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(136, 'P 42/m n m', transformations) space_groups[136] = sg space_groups['P 42/m n m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(137, 'P 42/n m c :2', transformations) space_groups[137] = sg space_groups['P 42/n m c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(138, 'P 42/n c m :2', transformations) space_groups[138] = sg space_groups['P 42/n c m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(139, 'I 4/m m m', transformations) space_groups[139] = sg space_groups['I 4/m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(140, 'I 4/m c m', transformations) space_groups[140] = sg space_groups['I 4/m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(141, 'I 41/a m d :2', transformations) space_groups[141] = sg space_groups['I 41/a m d :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,5,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(142, 'I 41/a c d :2', transformations) space_groups[142] = sg space_groups['I 41/a c d :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(143, 'P 3', transformations) space_groups[143] = sg space_groups['P 3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(144, 'P 31', transformations) space_groups[144] = sg space_groups['P 31'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(145, 'P 32', transformations) space_groups[145] = sg space_groups['P 32'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(146, 'R 3 :H', transformations) space_groups[146] = sg space_groups['R 3 :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(147, 'P -3', transformations) space_groups[147] = sg space_groups['P -3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(148, 'R -3 :H', transformations) space_groups[148] = sg space_groups['R -3 :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(149, 'P 3 1 2', transformations) space_groups[149] = sg space_groups['P 3 1 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(150, 'P 3 2 1', transformations) space_groups[150] = sg space_groups['P 3 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(151, 'P 31 1 2', transformations) space_groups[151] = sg space_groups['P 31 1 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(152, 'P 31 2 1', transformations) space_groups[152] = sg space_groups['P 31 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(153, 'P 32 1 2', transformations) space_groups[153] = sg space_groups['P 32 1 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(154, 'P 32 2 1', transformations) space_groups[154] = sg space_groups['P 32 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(155, 'R 3 2 :H', transformations) space_groups[155] = sg space_groups['R 3 2 :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(156, 'P 3 m 1', transformations) space_groups[156] = sg space_groups['P 3 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(157, 'P 3 1 m', transformations) space_groups[157] = sg space_groups['P 3 1 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(158, 'P 3 c 1', transformations) space_groups[158] = sg space_groups['P 3 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(159, 'P 3 1 c', transformations) space_groups[159] = sg space_groups['P 3 1 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(160, 'R 3 m :H', transformations) space_groups[160] = sg space_groups['R 3 m :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(161, 'R 3 c :H', transformations) space_groups[161] = sg space_groups['R 3 c :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(162, 'P -3 1 m', transformations) space_groups[162] = sg space_groups['P -3 1 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(163, 'P -3 1 c', transformations) space_groups[163] = sg space_groups['P -3 1 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(164, 'P -3 m 1', transformations) space_groups[164] = sg space_groups['P -3 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(165, 'P -3 c 1', transformations) space_groups[165] = sg space_groups['P -3 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(166, 'R -3 m :H', transformations) space_groups[166] = sg space_groups['R -3 m :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,-1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,-1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,-1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(167, 'R -3 c :H', transformations) space_groups[167] = sg space_groups['R -3 c :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(168, 'P 6', transformations) space_groups[168] = sg space_groups['P 6'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(169, 'P 61', transformations) space_groups[169] = sg space_groups['P 61'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(170, 'P 65', transformations) space_groups[170] = sg space_groups['P 65'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(171, 'P 62', transformations) space_groups[171] = sg space_groups['P 62'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(172, 'P 64', transformations) space_groups[172] = sg space_groups['P 64'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(173, 'P 63', transformations) space_groups[173] = sg space_groups['P 63'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(174, 'P -6', transformations) space_groups[174] = sg space_groups['P -6'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num =
N.array([0,0,0])
numpy.array
#!/usr/bin/env python # Copyright (2021) by <NAME> """Implementation of Donald Knuth's Dancing Links algorithm. See https://www-cs-faculty.stanford.edu/~knuth/programs/dance.w """ import numpy as np from nptyping import NDArray import sys import time from exact_cover_problem import ExactCoverProblem, io_read_prob_matrix class Node: def __init__(self, val=None): self.val = val self.L = self self.R = self self.U = self self.D = self self.C = self def __repr__(self): return repr(self.val) class DLX(ExactCoverProblem): class NodeIterator: def __init__(self, matrix: NDArray, start: Node, stop: Node, nextf): self.matrix = matrix self.start = start self.stop = stop self.nextf = nextf self.node_iter = start def __iter__(self): return self def __next__(self): cur_iter = self.node_iter self.node_iter = self.nextf(self.node_iter) if cur_iter == self.stop: raise StopIteration() return cur_iter # -------------------- # Initialization # -------------------- def __init__(self, name, matrix: NDArray, do_prioritize_columns=True): self.name = name self.matrix = matrix self.do_prioritize_columns = do_prioritize_columns self.root = Node() self.solution = None # Preserved across recursive calls to search() self.solutions = None col_count = matrix.shape[1] self._init_col_hdrs(col_count) for row_index, row in enumerate(matrix): self._init_row(row, row_index) def _get_node(self, col_id, row_index): node = Node(row_index) col_hdr = self.col_hdrs[col_id] col_hdr.size += 1 node.D = col_hdr node.U = col_hdr.U col_hdr.U.D = node col_hdr.U = node node.C = col_hdr return node def _init_col_hdrs(self, col_count): self.col_hdrs = [Node(k) for k in range(col_count)] for col_hdr in self.col_hdrs: col_hdr.size = 0 hdr_iter = self.root # Start at root for k in range(col_count): hdr_iter.R = self.col_hdrs[k] self.col_hdrs[k].L = hdr_iter hdr_iter = self.col_hdrs[k] # Move to next col header # Make column header DLL circular if self.col_hdrs: self.col_hdrs[-1].R = self.root self.root.L = self.col_hdrs[-1] def _init_row(self, row, row_index): nonzero_indices = np.nonzero(row)[0] if nonzero_indices.size == 0: return first_node = self._get_node(nonzero_indices[0], row_index) node_iter = first_node for col_id in
np.nonzero(row)
numpy.nonzero
# Author: <NAME> <jtprice at cs.unc.edu> import numpy as np #------------------------------------------------------------------------------- # # Axis-Angle Functions # #------------------------------------------------------------------------------- # returns the cross product matrix representation of a 3-vector v def cross_prod_matrix(v): return np.array(((0., -v[2], v[1]), (v[2], 0., -v[0]), (-v[1], v[0], 0.))) #------------------------------------------------------------------------------- # www.euclideanspace.com/maths/geometry/rotations/conversions/angleToMatrix/ # if angle is None, assume ||axis|| == angle, in radians # if angle is not None, assume that axis is a unit vector def axis_angle_to_rotation_matrix(axis, angle=None): if angle is None: angle = np.linalg.norm(axis) if np.abs(angle) > np.finfo('float').eps: axis = axis / angle cp_axis = cross_prod_matrix(axis) return np.eye(3) + ( np.sin(angle) * cp_axis + (1. - np.cos(angle)) * cp_axis.dot(cp_axis)) #------------------------------------------------------------------------------- # after some deliberation, I've decided the easiest way to do this is to use # quaternions as an intermediary def rotation_matrix_to_axis_angle(R): return Quaternion.FromR(R).ToAxisAngle() #------------------------------------------------------------------------------- # # Quaternion # #------------------------------------------------------------------------------- class Quaternion: # create a quaternion from an existing rotation matrix # euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/ @staticmethod def FromR(R): trace = np.trace(R) if trace > 0: qw = 0.5 * np.sqrt(1. + trace) qx = (R[2, 1] - R[1, 2]) * 0.25 / qw qy = (R[0, 2] - R[2, 0]) * 0.25 / qw qz = (R[1, 0] - R[0, 1]) * 0.25 / qw elif R[0, 0] > R[1, 1] and R[0, 0] > R[2, 2]: s = 2. * np.sqrt(1. + R[0, 0] - R[1, 1] - R[2, 2]) qw = (R[2, 1] - R[1, 2]) / s qx = 0.25 * s qy = (R[0, 1] + R[1, 0]) / s qz = (R[0, 2] + R[2, 0]) / s elif R[1, 1] > R[2, 2]: s = 2. * np.sqrt(1. + R[1, 1] - R[0, 0] - R[2, 2]) qw = (R[0, 2] - R[2, 0]) / s qx = (R[0, 1] + R[1, 0]) / s qy = 0.25 * s qz = (R[1, 2] + R[2, 1]) / s else: s = 2. * np.sqrt(1. + R[2, 2] - R[0, 0] - R[1, 1]) qw = (R[1, 0] - R[0, 1]) / s qx = (R[0, 2] + R[2, 0]) / s qy = (R[1, 2] + R[2, 1]) / s qz = 0.25 * s return Quaternion(np.array((qw, qx, qy, qz))) # if angle is None, assume ||axis|| == angle, in radians # if angle is not None, assume that axis is a unit vector @staticmethod def FromAxisAngle(axis, angle=None): if angle is None: angle = np.linalg.norm(axis) if np.abs(angle) > np.finfo('float').eps: axis = axis / angle qw = np.cos(0.5 * angle) axis = axis * np.sin(0.5 * angle) return Quaternion(np.array((qw, axis[0], axis[1], axis[2]))) #--------------------------------------------------------------------------- def __init__(self, q=np.array((1., 0., 0., 0.))): if isinstance(q, Quaternion): self.q = q.q.copy() else: q = np.asarray(q) if q.size == 4: self.q = q.copy() elif q.size == 3: # convert from a 3-vector to a quaternion self.q = np.empty(4) self.q[0], self.q[1:] = 0., q.ravel() else: raise Exception('Input quaternion should be a 3- or 4-vector') def __add__(self, other): return Quaternion(self.q + other.q) def __iadd__(self, other): self.q += other.q return self # conjugation via the ~ operator def __invert__(self): return Quaternion(np.array((self.q[0], -self.q[1], -self.q[2], -self.q[3]))) # returns: self.q * other.q if other is a Quaternion; otherwise performs # scalar multiplication def __mul__(self, other): if isinstance(other, Quaternion): # quaternion multiplication return Quaternion( np.array((self.q[0] * other.q[0] - self.q[1] * other.q[1] - self.q[2] * other.q[2] - self.q[3] * other.q[3], self.q[0] * other.q[1] + self.q[1] * other.q[0] + self.q[2] * other.q[3] - self.q[3] * other.q[2], self.q[0] * other.q[2] - self.q[1] * other.q[3] + self.q[2] * other.q[0] + self.q[3] * other.q[1], self.q[0] * other.q[3] + self.q[1] * other.q[2] - self.q[2] * other.q[1] + self.q[3] * other.q[0]))) else: # scalar multiplication (assumed) return Quaternion(other * self.q) def __rmul__(self, other): return self * other def __imul__(self, other): self.q[:] = (self * other).q return self def __irmul__(self, other): self.q[:] = (self * other).q return self def __neg__(self): return Quaternion(-self.q) def __sub__(self, other): return Quaternion(self.q - other.q) def __isub__(self, other): self.q -= other.q return self def __str__(self): return str(self.q) def copy(self): return Quaternion(self) def dot(self, other): return self.q.dot(other.q) # assume the quaternion is nonzero! def inverse(self): return Quaternion((~self).q / self.q.dot(self.q)) def norm(self): return np.linalg.norm(self.q) def normalize(self): self.q /= np.linalg.norm(self.q) return self # assume x is a Nx3 numpy array or a numpy 3-vector def rotate_points(self, x): x = np.atleast_2d(x) return x.dot(self.ToR().T) # convert to a rotation matrix def ToR(self): return np.eye(3) + 2 * np.array(( (-self.q[2] * self.q[2] - self.q[3] * self.q[3], self.q[1] * self.q[2] - self.q[3] * self.q[0], self.q[1] * self.q[3] + self.q[2] * self.q[0]), (self.q[1] * self.q[2] + self.q[3] * self.q[0], -self.q[1] * self.q[1] - self.q[3] * self.q[3], self.q[2] * self.q[3] - self.q[1] * self.q[0]), (self.q[1] * self.q[3] - self.q[2] * self.q[0], self.q[2] * self.q[3] + self.q[1] * self.q[0], -self.q[1] * self.q[1] - self.q[2] * self.q[2]))) # convert to axis-angle representation, with angle encoded by the length def ToAxisAngle(self): # recall that for axis-angle representation (a, angle), with "a" unit: # q = (cos(angle/2), a * sin(angle/2)) # below, for readability, "theta" actually means half of the angle sin_sq_theta = self.q[1:].dot(self.q[1:]) # if theta is non-zero, then we can compute a unique rotation if np.abs(sin_sq_theta) > np.finfo('float').eps: sin_theta =
np.sqrt(sin_sq_theta)
numpy.sqrt
import numpy as np import matplotlib.pyplot as plt import ipywidgets as widgets import html import matplotlib.patches as patches from matplotlib.colors import SymLogNorm import astropy.units as u from .crisp import CRISP, CRISPSequence, CRISPWidebandSequence, CRISPNonU, CRISPNonUSequence from .inversions import Inversion from .utils import CRISP_sequence_constructor from matplotlib import ticker import matplotlib.patheffects as PathEffects from matplotlib.lines import Line2D from astropy.wcs.wcsapi import SlicedLowLevelWCS from .utils import pt_bright_cycler from IPython.core.display import display from matplotlib.dates import date2num, DateFormatter class SpectralViewer: """ Imaging spectroscopic viewer. SpectralViewer should be used when one wants to click on points of an image and have the spectrum displayed for that point. This works **exclusively** in Jupyter notebook but can be a nice data exploration tool. This viewer utilises the data structures defined in `crispy.crisp` and has many variable options. :param data: The data to explore, this can be either one or two spectral lines (support for more than two can be added if required). This is the only required argument to view the data. :type data: str or list or CRISP or CRISPSequence or CRISPNonU or CRISPNonUSequence :param wcs: A prescribed world coordinate system. If None, the world coordinate system is derived from the data. Default is None. :type wcs: astropy.wcs.WCS or None, optional :param uncertainty: The uncertainty in the intensity values of the data. Default is None. :type uncertainty: numpy.ndarray or None, optional :param mask: A mask to be used on the data. Default is None. :type mask: numpy.ndarray or None, optional :param nonu: Whether or not the spectral axis is non-uniform. Default is False. :type nonu: bool, optional :cvar coords: The coordinates selected to produce spectra. :type coords: list[tuple] :cvar px_coords: The coordinates selected to produce spectra in pixel space. This is important for indexing the data later to get the correct spectra. :type px_coords: list[tuple] :cvar shape_type: The spectra can be selected for a single point or for a box with specified dimensions with top-left corner where the user clicks. This attribute tells the user which point is described by which shape. :type shape_type: list[str] """ def __init__(self, data, wcs=None, uncertainty=None, mask=None, nonu=False): plt.style.use("bmh") self.aa = html.unescape("&#8491;") self.l = html.unescape("&lambda;") self.a = html.unescape("&alpha;") self.D = html.unescape("&Delta;") shape = widgets.Dropdown(options=["point", "box"], value="point", description="Shape: ") if not nonu: if type(data) == str: self.cube = CRISP(filename=data, wcs=wcs, uncertainty=uncertainty, mask=mask) if self.cube.file.data.ndim == 3: self.wvls = self.cube.wave(np.arange(self.cube.shape[0])) << u.Angstrom elif self.cube.file.data.ndim == 4: self.wvls = self.cube.wave(np.arange(self.cube.shape[1])) << u.Angstrom elif type(data) == list: data = CRISP_sequence_constructor(data, wcs=wcs, uncertainty=uncertainty, mask=mask, nonu=nonu) self.cube = CRISPSequence(data) if self.cube.list[0].file.data.ndim == 3: self.wvls1 = self.cube.list[0].wave(np.arange(self.cube.list[0].shape[0])) << u.Angstrom elif self.cube.list[0].file.data.ndim == 4: self.wvls1 = self.cube.list[0].wave(np.arange(self.cube.list[0].shape[1])) << u.Angstrom if self.cube.list[1].file.data.ndim == 3: self.wvls2 = self.cube.list[1].wave(np.arange(self.cube.list[1].shape[0])) << u.Angstrom elif self.cube.list[1].file.data.ndim == 4: self.wvls2 = self.cube.list[1].wave(np.arange(self.cube.list[1].shape[1])) << u.Angstrom elif type(data) == CRISP: self.cube = data if self.cube.file.data.ndim == 3: self.wvls = self.cube.wave(np.arange(self.cube.shape[0])) << u.Angstrom elif self.cube.file.data.ndim == 4: self.wvls = self.cube.wave(np.arange(self.cube.shape[1])) << u.Angstrom elif type(data) == CRISPSequence: self.cube = data if self.cube.list[0].file.data.ndim == 3: self.wvls1 = self.cube.list[0].wave(np.arange(self.cube.list[0].shape[0])) << u.Angstrom elif self.cube.list[0].file.data.ndim == 4: self.wvls1 = self.cube.list[0].wave(np.arange(self.cube.list[0].shape[1])) << u.Angstrom if self.cube.list[1].file.data.ndim == 3: self.wvls2 = self.cube.list[1].wave(np.arange(self.cube.list[1].shape[0])) << u.Angstrom elif self.cube.list[1].file.data.ndim == 4: self.wvls2 = self.cube.list[1].wave(np.arange(self.cube.list[1].shape[1])) << u.Angstrom else: if type(data) == str: self.cube = CRISPNonU(filename=data, wcs=wcs, uncertainty=uncertainty, mask=mask) if self.cube.file.data.ndim == 3: self.wvls = self.cube.wave(np.arange(self.cube.shape[0])) << u.Angstrom elif self.cube.file.data.ndim == 4: self.wvls = self.cube.wave(np.arange(self.cube.shape[1])) << u.Angstrom elif type(data) == list: data = CRISP_sequence_constructor(data, wcs=wcs, uncertainty=uncertainty, mask=mask, nonu=nonu) self.cube = CRISPNonUSequence(data) if self.cube.list[0].file.data.ndim == 3: self.wvls1 = self.cube.list[0].wave(np.arange(self.cube.list[0].shape[0])) << u.Angstrom elif self.cube.list[0].file.data.ndim == 4: self.wvls1 = self.cube.list[0].wave(np.arange(self.cube.list[0].shape[1])) << u.Angstrom if self.cube.list[1].file.data.ndim == 3: self.wvls2 = self.cube.list[1].wave(np.arange(self.cube.list[1].shape[0])) << u.Angstrom elif self.cube.list[1].file.data.ndim == 4: self.wvls2 = self.cube.list[1].wave(np.arange(self.cube.list[1].shape[1])) << u.Angstrom elif type(data) == CRISPNonU: self.cube = data if self.cube.file.data.ndim == 3: self.wvls = self.cube.wave(np.arange(self.cube.shape[0])) << u.Angstrom elif self.cube.file.data.ndim == 4: self.wvls = self.cube.wave(np.arange(self.cube.shape[1])) << u.Angstrom elif type(data) == CRISPNonUSequence: self.cube = data if self.cube.list[0].file.data.ndim == 3: self.wvls1 = self.cube.list[0].wave(np.arange(self.cube.list[0].shape[0])) << u.Angstrom elif self.cube.list[0].file.data.ndim == 4: self.wvls1 = self.cube.list[0].wave(np.arange(self.cube.list[0].shape[1])) << u.Angstrom if self.cube.list[1].file.data.ndim == 3: self.wvls2 = self.cube.list[1].wave(np.arange(self.cube.list[1].shape[0])) << u.Angstrom elif self.cube.list[1].file.data.ndim == 4: self.wvls2 = self.cube.list[1].wave(np.arange(self.cube.list[1].shape[1])) << u.Angstrom if type(self.cube) == CRISP or type(self.cube) == CRISPNonU: self.fig = plt.figure(figsize=(8,10)) try: self.ax1 = self.fig.add_subplot(1, 2, 1, projection=self.cube.wcs.dropaxis(-1)) except: self.ax1 = self.fig.add_subplot(1, 2, 1, projection=SlicedLowLevelWCS(self.cube[0].wcs.low_level_wcs, 0)) self.ax1.set_ylabel("Helioprojective Latitude [arcsec]") self.ax1.set_xlabel("Helioprojective Longitude [arcsec]") self.ax2 = self.fig.add_subplot(1, 2, 2) self.ax2.yaxis.set_label_position("right") self.ax2.yaxis.tick_right() self.ax2.set_ylabel("I [DNs]") self.ax2.set_xlabel(f"{self.l} [{self.aa}]") self.ax2.tick_params(direction="in") ll = widgets.SelectionSlider(options=[np.round(l - np.median(self.wvls), decimals=2).value for l in self.wvls], description = f"{self.D} {self.l} [{self.aa}]") out1 = widgets.interactive_output(self._img_plot1, {"ll" : ll}) out2 = widgets.interactive_output(self._shape, {"opts" : shape}) display(widgets.HBox([ll, shape])) elif type(self.cube) == CRISPSequence or type(self.cube) == CRISPNonUSequence: self.fig = plt.figure(figsize=(8,10)) try: self.ax1 = self.fig.add_subplot(2, 2, 1, projection=self.cube.list[0].wcs.dropaxis(-1)) except: self.ax1 = self.fig.add_subplot(2, 2, 1, projection=SlicedLowLevelWCS(self.cube.list[0][0].wcs.low_level_wcs, 0)) self.ax1.set_ylabel("Helioprojective Latitude [arcsec]") self.ax1.set_xlabel("Helioprojective Longitude [arcsec]") self.ax1.xaxis.set_label_position("top") self.ax1.xaxis.tick_top() try: self.ax2 = self.fig.add_subplot(2, 2, 3, projection=self.cube.list[0].wcs.dropaxis(-1)) except: self.ax2 = self.fig.add_subplot(2, 2, 3, projection=SlicedLowLevelWCS(self.cube.list[0][0].wcs.low_level_wcs, 0)) self.ax2.set_ylabel("Helioprojective Latitude [arcsec]") self.ax2.set_xlabel("Helioprojective Longitude [arcsec]") self.ax3 = self.fig.add_subplot(2, 2, 2) self.ax3.yaxis.set_label_position("right") self.ax3.yaxis.tick_right() self.ax3.set_ylabel("Intensity [DNs]") self.ax3.set_xlabel(f"{self.l} [{self.aa}]") self.ax3.xaxis.set_label_position("top") self.ax3.xaxis.tick_top() self.ax3.tick_params(direction="in") self.ax4 = self.fig.add_subplot(2, 2, 4) self.ax4.yaxis.set_label_position("right") self.ax4.yaxis.tick_right() self.ax4.set_ylabel("Intensity [DNs]") self.ax4.set_xlabel(f"{self.l} [{self.aa}]") self.ax4.tick_params(direction="in") ll1 = widgets.SelectionSlider( options=[np.round(l - np.median(self.wvls1), decimals=2).value for l in self.wvls1], description=fr"{self.D} {self.l}$_{1}$ [{self.aa}]", style={"description_width" : "initial"} ) ll2 = widgets.SelectionSlider( options=[np.round(l - np.median(self.wvls2), decimals=2).value for l in self.wvls2], description=fr"{self.D} {self.l}$_{2}$ [{self.aa}]", style={"description_width" : "initial"} ) out1 = widgets.interactive_output(self._img_plot2, {"ll1" : ll1, "ll2" : ll2}) out2 = widgets.interactive_output(self._shape, {"opts" : shape}) display(widgets.HBox([widgets.VBox([ll1, ll2]), shape])) self.coords = [] self.px_coords = [] self.shape_type = [] self.box_coords = [] self.colour_idx = 0 self.n = 0 self.receiver = self.fig.canvas.mpl_connect("button_press_event", self._on_click) try: x = widgets.IntText(value=1, min=1, max=self.cube.shape[-1], description="x [pix]") y = widgets.IntText(value=1, min=1, max=self.cube.shape[-2], description="y [pix]") except: x = widgets.IntText(value=1, min=1, max=self.cube.list[0].shape[-1], description="x [pix]") y = widgets.IntText(value=1, min=1, max=self.cube.list[0].shape[-2], description="y [pix]") outx = widgets.interactive_output(self._boxx, {"x" : x}) outy = widgets.interactive_output(self._boxy, {"y" : y}) display(widgets.HBox([x, y])) done_button = widgets.Button(description="Done") done_button.on_click(self._disconnect_matplotlib) clear_button = widgets.Button(description="Clear") clear_button.on_click(self._clear) save_button = widgets.Button(description="Save") save_button.on_click(self._save) display(widgets.HBox([done_button, clear_button, save_button])) widgets.interact(self._file_name, fn= widgets.Text(description="Filename to save as: ", style={"description_width" : "initial"}, layout=widgets.Layout(width="50%"))) def _on_click(self, event): if self.fig.canvas.manager.toolbar.mode != "": return if type(self.cube) == CRISP or type(self.cube) == CRISPNonU: if self.shape == "point": if self.colour_idx > len(pt_bright_cycler)-1: self.colour_idx = 0 self.n += 1 centre_coord = int(event.ydata), int(event.xdata) self.px_coords.append(centre_coord) self.shape_type.append("point") circ = patches.Circle(centre_coord[::-1], radius=10, facecolor=list(pt_bright_cycler)[self.colour_idx]["color"], edgecolor="k", linewidth=1) self.ax1.add_patch(circ) font = { "size" : 12, "color" : list(pt_bright_cycler)[self.colour_idx]["color"] } txt = self.ax1.text(centre_coord[1]+20, centre_coord[0]+10, s=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", fontdict=font) txt.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) px = self.cube.to_lonlat(*centre_coord) << u.arcsec if self.cube.file.data.ndim == 3: self.ax2.plot(self.wvls, self.cube.file.data[:, centre_coord[0], centre_coord[1]], marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) elif self.cube.file.data.ndim == 4: self.ax2.plot(self.wvls, self.cube.file.data[0, :, centre_coord[0], centre_coord[1]], marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax2.legend() self.coords.append(px) self.colour_idx += 1 self.fig.canvas.draw() elif self.shape == "box": if self.colour_idx > len(pt_bright_cycler)-1: self.colour_idx = 0 self.n += 1 box_anchor = int(event.ydata), int(event.xdata) self.px_coords.append(box_anchor) self.shape_type.append("box") # obtain the coordinates of the box on a grid with pixels the size of the box to make sure there is not copies of the same box box_coord = box_anchor[0] // self.boxy, box_anchor[1] // self.boxx if box_coord in self.box_coords: coords = [p.get_xy() for p in self.ax1.patches] for p in self.ax1.patches: if p.get_xy() == box_anchor: p.remove() idx = self.box_coords.index(box_coord) del self.box_coords[idx] del self.px_coords[idx] del self.shape_type[idx] del self.coords[idx] return self.coords.append(self.cube.to_lonlat(*box_anchor) << u.arcsec) rect = patches.Rectangle(box_anchor[::-1], self.boxx, self.boxy, linewidth=2, edgecolor=list(pt_bright_cycler)[self.colour_idx]["color"], facecolor="none") rect.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) self.ax1.add_patch(rect) font = { "size" : 12, "color" : list(pt_bright_cycler)[self.colour_idx]["color"] } txt = self.ax1.text(box_anchor[1]-50, box_anchor[0]-10, s=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", fontdict=font) txt.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) if self.cube.file.data.ndim == 3: self.ax2.plot(self.wvls, np.mean(self.cube.file.data[:,box_anchor[0]:box_anchor[0]+self.boxy,box_anchor[1]:box_anchor[1]+self.boxx],axis=(1,2)), marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) elif self.cube.file.data.ndim == 4: self.ax2.plot(self.wvls, np.mean(self.cube.file.data[0, :,box_anchor[0]:box_anchor[0]+self.boxy,box_anchor[1]:box_anchor[1]+self.boxx],axis=(1,2)), marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax2.legend() self.colour_idx += 1 self.fig.canvas.draw() elif type(self.cube) == CRISPSequence or type(self.cube) == CRISPNonUSequence: if self.shape == "point": if self.colour_idx > len(pt_bright_cycler)-1: self.colour_idx = 0 self.n += 1 centre_coord = int(event.ydata), int(event.xdata) #with WCS, the event data is returned in pixels so we don't need to do the conversion from real world but rather to real world later on self.px_coords.append(centre_coord) circ1 = patches.Circle(centre_coord[::-1], radius=10, facecolor=list(pt_bright_cycler)[self.colour_idx]["color"], edgecolor="k", linewidth=1) circ2 = patches.Circle(centre_coord[::-1], radius=10, facecolor=list(pt_bright_cycler)[self.colour_idx]["color"], edgecolor="k", linewidth=1) self.ax1.add_patch(circ1) self.ax2.add_patch(circ2) font = { "size" : 12, "color" : list(pt_bright_cycler)[self.colour_idx]["color"] } txt_1 = self.ax1.text(centre_coord[1]+20, centre_coord[0]+10, s=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", fontdict=font) txt_2 = self.ax2.text(centre_coord[1]+20, centre_coord[0]+10, s=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", fontdict=font) txt_1.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) txt_2.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) px = self.cube.list[0].to_lonlat(*centre_coord) << u.arcsec if self.cube.list[0].file.data.ndim == 3: self.ax3.plot(self.wvls1, self.cube.list[0].file.data[:, centre_coord[0], centre_coord[1]], marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) elif self.cube.list[0].file.data.ndim == 4: self.ax3.plot(self.wvls1, self.cube.list[0].file.data[0, :, centre_coord[0], centre_coord[1]], marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) if self.cube.list[1].file.data.ndim == 3: self.ax4.plot(self.wvls2, self.cube.list[1].file.data[:, centre_coord[0], centre_coord[1]], marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) elif self.cube.list[1].file.data.ndim == 4: self.ax4.plot(self.wvls2, self.cube.list[1].file.data[0, :, centre_coord[0], centre_coord[1]], marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax3.legend() self.ax4.legend() self.coords.append(px) self.colour_idx += 1 self.fig.canvas.draw() elif self.shape == "box": if self.colour_idx > len(pt_bright_cycler)-1: self.colour_idx = 0 self.n += 1 box_anchor = int(event.ydata), int(event.xdata) self.px_coords.append(box_anchor) self.shape_type.append("box") # obtain the coordinates of the box on a grid with pixels the size of the box to make sure there is not copies of the same box box_coord = box_anchor[0] // self.boxy, box_anchor[1] // self.boxx if box_coord in self.box_coords: coords = [p.get_xy() for p in self.ax.patches] for p in self.ax.patches: if p.get_xy() == box_anchor: p.remove() idx = self.box_coords.index(box_coord) del self.box_coords[idx] del self.px_coords[idx] del self.shape_type[idx] del self.coords[idx] return self.coords.append(self.cube.to_lonlat(*box_anchor) << u.arcsec) rect1 = patches.Rectangle(box_anchor[::-1], self.boxx, self.boxy, linewidth=2, edgecolor=list(pt_bright_cycler)[self.colour_idx]["color"], facecolor="none") rect1.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) rect2 = patches.Rectangle(box_anchor[::-1], self.boxx, self.boxy, linewidth=2, edgecolor=list(pt_bright_cycler)[self.colour_idx]["color"], facecolor="none") rect2.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) self.ax1.add_patch(rect1) self.ax2.add_patch(rect2) font = { "size" : 12, "color" : list(pt_bright_cycler)[self.colour_idx]["color"] } txt1 = self.ax1.text(box_anchor[1]-50, box_anchor[0]-10, s=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", fontdict=font) txt1.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) txt2 = self.ax2.text(box_anchor[1]-50, box_anchor[0]-1, s=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", fontdict=font) txt2.set_path_effect([PathEffects.withStroke(linewidth=3, foreground="k")]) if self.cube.list[0].file.data.ndim == 3: self.ax3.plot(self.wvls1, np.mean(self.cube.list[0].file.data[:,box_anchor[0]:box_anchor[0]+self.boxy,box_anchor[1]:box_anchor[1]+self.boxx],axis=(1,2)), marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) elif self.cube.list[0].file.data.ndim == 4: self.ax3.plot(self.wvls1, np.mean(self.cube.list[0].file.data[0, :,box_anchor[0]:box_anchor[0]+self.boxy,box_anchor[1]:box_anchor[1]+self.boxx],axis=(1,2)), marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) if self.cube.list[1].file.data.ndim == 3: self.ax4.plot(self.wvls2, np.mean(self.cube.list[1].file.data[:,box_anchor[0]:box_anchor[0]+self.boxy,box_anchor[1]:box_anchor[1]+self.boxx],axis=(1,2)), marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) elif self.cube.list[1].file.data.ndim == 4: self.ax4.plot(self.wvls2, np.mean(self.cube.list[1].file.data[0, :,box_anchor[0]:box_anchor[0]+self.boxy,box_anchor[1]:box_anchor[1]+self.boxx],axis=(1,2)), marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax3.legend() self.ax4.legend() self.colour_idx += 1 self.fig.canvas.draw() def _shape(self, opts): self.shape = opts def _boxx(self, x): self.boxx = x def _boxy(self, y): self.boxy = y def _disconnect_matplotlib(self, _): self.fig.canvas.mpl_disconnect(self.receiver) def _clear(self, _): self.coords = [] self.px_coords = [] self.shape_type = [] self.box_coords = [] self.colour_idx = 0 self.n = 0 if type(self.cube) == CRISP: while len(self.ax1.patches) > 0: for p in self.ax1.patches: p.remove() while len(self.ax1.texts) > 0: for t in self.ax1.texts: t.remove() self.ax2.clear() self.ax2.set_ylabel("Intensity [DNs]") self.ax2.set_xlabel(f"{self.l} [{self.aa}]") self.fig.canvas.draw() self.fig.canvas.flush_events() else: while len(self.ax1.patches) > 0: for p in self.ax1.patches: p.remove() while len(self.ax2.patches) > 0: for p in self.ax2.patches: p.remove() while len(self.ax1.texts) > 0: for t in self.ax1.texts: t.remove() while len(self.ax2.texts) > 0: for t in self.ax2.texts: t.remove() self.ax3.clear() self.ax3.set_ylabel("Intensity [DNs]") self.ax3.set_xlabel(f"{self.l} [{self.aa}]") self.ax4.clear() self.ax4.set_ylabel("Intensity [DNs]") self.ax4.set_xlabel(f"{self.l} [{self.aa}]") self.fig.canvas.draw() self.fig.canvas.flush_events() def _save(self, _): self.fig.savefig(self.filename, dpi=300) def _file_name(self, fn): self.filename = fn def _img_plot1(self, ll): if self.ax1.images == []: pass elif self.ax1.images[-1].colorbar != None: self.ax1.images[-1].colorbar.remove() ll_idx = int(np.where(np.round(self.wvls, decimals=2).value == np.round(np.median(self.wvls).value + ll, decimals=2))[0]) try: data = self.cube.file.data[ll_idx].astype(np.float) data[data < 0] = np.nan im1 = self.ax1.imshow(data, cmap="Greys_r") except: data = self.cube.file.data[0, ll_idx].astype(np.float) data[data < 0] = np.nan im1 = self.ax1.imshow(data, cmap="Greys_r") try: el = self.cube.file.header["WDESC1"] except KeyError: el = self.cube.file.header["element"] self.ax1.set_title(fr"{el} {self.aa} {self.D} {self.l}$_{1}$ = {ll} {self.aa}") self.fig.colorbar(im1, ax=self.ax1, orientation="horizontal", label="Intensity [DNs]") def _img_plot2(self, ll1, ll2): if self.ax1.images == []: pass elif self.ax1.images[-1].colorbar != None: self.ax1.images[-1].colorbar.remove() if self.ax2.images == []: pass elif self.ax2.images[-1].colorbar != None: self.ax2.images[-1].colorbar.remove() ll1_idx = int(np.where(np.round(self.wvls1, decimals=2).value == np.round(np.median(self.wvls1).value + ll1, decimals=2))[0]) ll2_idx = int(np.where(np.round(self.wvls2, decimals=2).value == np.round(np.median(self.wvls2).value + ll2, decimals=2))[0]) try: data = self.cube.list[0].file.data[ll1_idx].astype(np.float) data[data < 0] = np.nan im1 = self.ax1.imshow(data, cmap="Greys_r") except: data = self.cube.list[0].file.data[0, ll1_idx].astype(np.float) data[data < 0] = np.nan im1 = self.ax1.imshow(data, cmap="Greys_r") try: data = self.cube.list[1].file.data[ll2_idx].astype(np.float) data[data < 0] = np.nan im2 = self.ax2.imshow(data, cmap="Greys_r") except: data = self.cube.list[1].file.data[0, ll2_idx].astype(np.float) data[data < 0] = np.nan im2 = self.ax2.imshow(data, cmap="Greys_r") try: el1 = self.cube.list[0].file.header["WDESC1"] el2 = self.cube.list[1].file.header["WDESC1"] except KeyError: el1 = self.cube.list[0].file.header["element"] el2 = self.cube.list[1].file.header["element"] self.ax1.set_title(fr"{el1} {self.aa} {self.D} {self.l}$_{1}$ = {ll1} {self.aa}") self.ax2.set_title(fr"{el2} {self.aa} {self.D} {self.l}$_{2}$ = {ll2} {self.aa}") self.fig.colorbar(im1, ax=self.ax1, orientation="horizontal", label="Intensity [DNs]") self.fig.colorbar(im2, ax=self.ax2, orientation="horizontal", label="Intensity [DNs]") class WidebandViewer: """ Wideband image viewer. This visualisation tool is useful for exploring the time series evolution of the wideband images. :param files: The files to explore the time series for. :type files: CRISPWidebandSequence or list :cvar coords: The coordinates selected to produce spectra. :type coords: list[tuple] :cvar px_coords: The coordinates selected to produce spectra in pixel space. This is important for indexing the data later to get the correct spectra. :type px_coords: list[tuple] :cvar shape_type: The spectra can be selected for a single point or for a box with specified dimensions with top-left corner where the user clicks. This attribute tells the user which point is described by which shape. :type shape_type: list[str] """ def __init__(self, files): plt.style.use("bmh") shape = widgets.Dropdown(options=["point", "box"], value="point", description="Shape: ") if type(files) == CRISPWidebandSequence: self.cube = files elif type(files) == list and type(files[0]) == dict: self.cube = CRISPWidebandSequence(files) elif type(files) == list and type(files[0]) == str: files = [{"filename" : f} for f in files] self.cube = CRISPWidebandSequence(files) elif type(files) == list and type(files[0]) == CRISPWidebandSequence: self.cube = files if type(self.cube) is not list: try: self.time = [date2num(f.file.header["DATE-AVG"]) for f in self.cube.list] except KeyError: self.time = [date2num(f.file.header["date_obs"]+" "+f.file.header["time_obs"]) for f in self.cube.list] self.fig = plt.figure(figsize=(8,10)) self.ax1 = self.fig.add_subplot(1, 2, 1, projection=self.cube.list[0].wcs) self.ax1.set_ylabel("Helioprojective Latitude [arcsec]") self.ax1.set_xlabel("Helioprojective Longitude [arcsec]") self.ax2 = self.fig.add_subplot(1, 2, 2) self.ax2.yaxis.set_label_position("right") self.ax2.yaxis.tick_right() self.ax2.set_ylabel("I [DNs]") self.ax2.set_xlabel("Time [UTC]") self.ax2.xaxis.set_major_locator(plt.MaxNLocator(4)) self.ax2.tick_params(direction="in") t = widgets.IntSlider(value=0, min=0, max=len(self.cube.list)-1, step=1, description="Time index: ", style={"description_width" : "initial"}) widgets.interact(self._img_plot1, t = t) else: try: self.time1 = [date2num(f.file.header["DATE-AVG"]) for f in self.cube[0].list] self.time2 = [date2num(f.file.header["DATE-AVG"]) for f in self.cube[1].list] except KeyError: self.time1 = [date2num(f.file.header["date_obs"]+" "+f.file.header["time_obs"]) for f in self.cube[0].list] self.time2 = [date2num(f.file.header["date_obs"]+" "+f.file.header["time_obs"]) for f in self.cube[1].list] self.fig = plt.figure(figsize=(8,10)) self.ax1 = self.fig.add_subplot(2, 2, 1, projection=self.cube[0].list[0].wcs) self.ax1.set_ylabel("Helioprojective Latitude [arcsec]") self.ax1.set_xlabel("Helioprojective Longitude [arcsec]") self.ax1.xaxis.set_label_position("top") self.ax1.xaxis.tick_top() self.ax2 = self.fig.add_subplot(2, 2, 3, projection=self.cube[1].list[0].wcs) self.ax2.ylabel("Helioprojective Latitude [arcsec]") self.ax2.xlabel("Helioprojective Longitude [arcsec]") self.ax3 = self.fig.add_subplot(2, 2, 2) self.ax3.yaxis.set_label_position("right") self.ax3.yaxis.tick_right() self.ax3.set_ylabel("I [DNs]") self.ax3.set_xlabel("Time [UTC]") self.ax3.xaxis.set_label_position("top") self.ax3.xaxis.tick_top() self.ax3.xaxis.set_major_locator(plt.MaxNLocator(4)) self.ax3.tick_params(direction="in") self.ax4 = self.fig.add_subplot(2, 2, 4) self.ax4.yaxis.set_label_position("right") self.ax4.yaxis.tick_right() self.ax4.set_ylabel("I [DNs]") self.ax4.set_xlabel("Time [UTC]") self.ax4.xaxis.set_major_locator(plt.MaxNLocator(4)) self.ax4.tick_params(direction="in") t1 = widgets.IntSlider(value=0, min=0, max=len(self.cube[0].list)-1, step=1, description="Time index: ", style={"description_width" : "initial"}) t2 = widgets.IntSlider(value=0, min=0, max=len(self.cube[1].list)-1, step=1, description="Time index: ", style={"description_width" : "initial"}) widgets.interact(self._img_plot2, t1=t1, t2=t2) self.coords = [] self.px_coords = [] self.shape_type = [] self.box_coords = [] self.colour_idx = 0 self.n = 0 self.reveiver = self.fig.canvas.mpl_connect("button_press_event", self._on_click) widgets.interact(self._shape, opts=shape) x = widgets.IntText(value=1, min=1, max=self.cube.list[0].shape[-1], description="x [pix]") y = widgets.IntText(value=1, min=1, max=self.cube.list[0].shape[-2], description="y [pix]") outx = widgets.interactive_output(self._boxx, {"x" : x}) outy = widgets.interactive_output(self._boxy, {"y" : y}) display(widgets.HBox([x, y])) done_button = widgets.Button(description="Done") done_button.on_click(self._disconnect_matplotlib) clear_button = widgets.Button(description="Clear") clear_button.on_click(self._clear) save_button = widgets.Button(description="Save") save_button.on_click(self._save) display(widgets.HBox([done_button, clear_button, save_button])) widgets.interact(self._file_name, fn= widgets.Text(description="Filename to save as: ", style={"description_width" : "initial"}, layout=widgets.Layout(width="50%"))) def _on_click(self, event): if self.fig.canvas.manager.toolbar.mode != "": return if type(self.cube) == CRISPWidebandSequence: if self.shape == "point": if self.colour_idx > len(pt_bright_cycler)-1: self.colour_idx = 0 self.n += 1 centre_coord = int(event.ydata), int(event.xdata) self.px_coords.append(centre_coord) self.shape_type.append("point") circ = patches.Circle(centre_coord[::-1], radius=10, facecolor=list(pt_bright_cycler)[self.colour_idx]["color"], edgecolor="k", linewidth=1) self.ax1.add_patch(circ) font = { "size" : 12, "color" : list(pt_bright_cycler)[self.colour_idx]["color"] } txt = self.ax1.text(centre_coord[1]+20, centre_coord[0]+10, s=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", fontdict=font) txt.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) px = self.cube.list[0].wcs.array_index_to_world(*centre_coord) << u.arcsec prof = [f.file.data[centre_coord[0], centre_coord[1]] for f in self.cube.list] self.ax2.plot(self.time, prof, marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax2.xaxis.set_major_formatter(DateFormatter("%H:%M:%S")) for label in self.ax2.get_xticklabels(): label.set_rotation(40) label.set_horizontalalignment('right') self.ax2.legend() self.coords.append(px) self.colour_idx += 1 self.fig.canvas.draw() elif self.shape == "box": if self.colour_idx > len(pt_bright_cycler)-1: self.colour_idx = 0 self.n += 1 box_anchor = int(event.ydata), int(event.xdata) self.px_coords.append(box_anchor) self.shape_type.append("box") # obtain the coordinates of the box on a grid with pixels the size of the box to make sure there is not copies of the same box box_coord = box_anchor[0] // self.boxy, box_anchor[1] // self.boxx if box_coord in self.box_coords: coords = [p.get_xy() for p in self.ax.patches] for p in self.ax.patches: if p.get_xy() == box_anchor: p.remove() idx = self.box_coords.index(box_coord) del self.box_coords[idx] del self.px_coords[idx] del self.shape_type[idx] del self.coords[idx] return self.coords.append(self.cube.to_lonlat(*box_anchor) << u.arcsec) rect = patches.Rectangle(box_anchor[::-1], self.boxx, self.boxy, linewidth=2, edgecolor=list(pt_bright_cycler)[self.colour_idx]["color"], facecolor="none") rect.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) self.ax1.add_patch(rect) font = { "size" : 12, "color" : list(pt_bright_cycler)[self.colour_idx]["color"] } txt = self.ax1.text(box_anchor[1]-50, box_anchor[0]-10, s=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", fontdict=font) txt.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) prof = [np.mean(f.file.data[box_anchor[0]:box_anchor[0]+self.boxy, box_anchor[1]:box_anchor[1]+self.boxx]) for f in self.cube.list] self.ax2.plot(self.time, prof, marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax2.xaxis.set_major_formatter(DateFormatter("%H:%M:%S")) for label in self.ax2.get_xticklabels(): label.set_rotation(40) label.set_horizontalalignment('right') self.ax2.legend() self.colour_idx += 1 self.fig.canvas.draw() elif type(self.cube) == list: if self.shape == "point": if self.colour_idx > len(pt_bright_cycler)-1: self.colour_idx = 0 self.n += 1 centre_coord = int(event.ydata), int(event.xdata) self.px_coords.append(centre_coord) circ1 = patches.Circle(centre_coord[::-1], radius=10, facecolor=list(pt_bright_cycler)[self.colour_idx]["color"], edgecolor="k", linewidth=1) circ2 = patches.Circle(centre_coord[::-1], radius=10, facecolor=list(pt_bright_cycler)[self.colour_idx]["color"], edgecolor="k", linewidth=1) self.ax1.add_patch(circ1) self.ax2.add_patch(circ2) font = { "size" : 12, "color" : list(pt_bright_cycler)[self.colour_idx]["color"] } txt_1 = self.ax1.text(centre_coord[1]+20, centre_coord[0]+10, s=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", fontdict=font) txt_2 = self.ax2.text(centre_coord[1]+20, centre_coord[0]+10, s=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", fontdict=font) txt_1.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) txt_2.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) px = self.cube[0].list[0].wcs.array_index_to_world(*centre_coord) << u.arcsec prof_1 = [f.file.data[centre_coord[0], centre_coord[1]] for f in self.cube[0].list] prof_2 = [f.file.data[centre_coord[0], centre_coord[1]] for f in self.cube[1].list] self.ax3.plot(self.time1, prof_1, marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax4.plot(self.time2, prof_2, marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax3.legend() self.ax3.xaxis.set_major_formatter(DateFormatter("%H:%M:%S")) for label in self.ax3.get_xticklabels(): label.set_rotation(40) label.set_horizontalalignment('right') self.ax4.legend() self.ax4.xaxis.set_major_formatter(DateFormatter("%H:%M:%S")) for label in self.ax4.get_xticklabels(): label.set_rotation(40) label.set_horizontalalignment('right') self.coords.append(px) self.colour_idx += 1 self.fig.canvas.draw() elif self.shape == "box": if self.colour_idx > len(pt_bright_cycler)-1: self.colour_idx = 0 self.n += 1 box_anchor = int(event.ydata), int(event.xdata) self.px_coords.append(box_anchor) self.shape_type.append("box") # obtain the coordinates of the box on a grid with pixels the size of the box to make sure there is not copies of the same box box_coord = box_anchor[0] // self.boxy, box_anchor[1] // self.boxx if box_coord in self.box_coords: coords = [p.get_xy() for p in self.ax.patches] for p in self.ax.patches: if p.get_xy() == box_anchor: p.remove() idx = self.box_coords.index(box_coord) del self.box_coords[idx] del self.px_coords[idx] del self.shape_type[idx] del self.coords[idx] return self.coords.append(self.cube.to_lonlat(*box_anchor) << u.arcsec) rect1 = patches.Rectangle(box_anchor[::-1], self.boxx, self.boxy, linewidth=2, edgecolor=list(pt_bright_cycler)[self.colour_idx]["color"], facecolor="none") rect1.set_path_effects([PathEffects(linewidth=3, foreground="k")]) rect2 = patches.Rectangle(box_anchor[::-1], self.boxx, self.boxy, linewidth=2, edgecolor=list(pt_bright_cycler)[self.colour_idx]["color"], facecolor="none") rect2.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) self.ax1.add_patch(rect1) self.ax2.add_patch(rect2) font = { "size" : 12, "color" : list(pt_bright_cycler)[self.colour_idx]["color"] } txt1 = self.ax1.text(box_anchor[1]-50, box_anchor[0]-10, s=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", fontdict=font) txt1.set_path_effects([PathEffects(linewidth=3, foreground="k")]) txt2 = self.ax2.text(box_anchor[1]-50, box_anchor[0]-1, s=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", fontdict=font) txt2.set_path_effect([PathEffects(linewidth=3, foreground="k")]) prof_1 = [np.mean(f.file.data[box_anchor[0]:box_anchor[0]+self.boxy, box_anchor[1]:box_anchor[1]+self.boxx]) for f in self.cube[0].list] prof_2 = [np.mean(f.file.data[box_anchor[0]:box_anchor[0]+self.boxy, box_anchor[1]:box_anchor[1]+self.boxx]) for f in self.cube[1].list] self.ax3.plot(self.time1, prof_1, marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax4.plot(self.time2, prof_2, marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax3.xaxis.set_major_formatter(DateFormatter("%H:%M:%S")) for label in self.ax3.get_xticklabels(): label.set_rotation(40) label.set_horizontalalignment('right') self.ax4.xaxis.set_major_formatter(DateFormatter("%H:%M:%S")) for label in self.ax4.get_xticklabels(): label.set_rotation(40) label.set_horizontalalignment('right') self.ax3.legend() self.ax4.legend() self.colour_idx += 1 self.fig.canvas.draW() def _shape(self, opts): self.shape = opts def _boxx(self, x): self.boxx = x def _boxy(self, y): self.boxy = y def _disconnect_matplotlib(self, _): self.fig.canvas.mpl_disconnect(self.receiver) def _clear(self, _): self.coords = [] self.px_coords = [] self.shape_type = [] self.box_coords = [] self.colour_idx = 0 self.n = 0 if type(self.cube) == CRISPWidebandSequence: while len(self.ax1.patches) > 0: for p in self.ax1.patches: p.remove() while len(self.ax1.texts) > 0: for t in self.ax1.texts: t.remove() self.ax2.clear() self.ax2.set_ylabel("I [DNs]") self.ax2.set_xlabel("Time [UTC]") self.ax2.xaxis.set_major_locator(plt.MaxNLocator(4)) self.fig.canvas.draw() self.fig.canvas.flush_events() else: while len(self.ax1.patches) > 0: for p in self.ax1.patches: p.remove() while len(self.ax2.patches) > 0: for p in self.ax2.patches: p.remove() while len(self.ax1.texts) > 0: for t in self.ax1.patches: t.remove() while len(self.ax2.patches) > 0: for t in self.ax2.patches: t.remove() self.ax3.clear() self.ax3.set_ylabel("I [DNs]") self.ax3.set_xlabel("Time [UTC]") self.ax3.xaxis.set_major_locator(plt.MaxNLocator(4)) self.ax4.clear() self.ax4.set_ylabel("I [DNs]") self.ax4.set_xlabel("Time [UTC]") self.ax4.xaxis.set_major_locator(plt.MaxNLocator(4)) self.fig.canvas.draw() self.fig.canvas.flush_events() def _save(self, _): self.fig.savefig(self.filename, dpi=300) def _file_name(self, fn): self.filename = fn def _img_plot1(self, t): if self.ax1.images == []: pass elif self.ax1.images[-1].colorbar is not None: self.ax1.images[-1].colorbar.remove() im1 = self.ax1.imshow(self.cube.list[t].file.data, cmap="Greys_r") self.fig.colorbar(im1, ax=self.ax1, orientation="horizontal", label="I [DNs]") def _img_plot2(self, t1, t2): if self.ax1.images == []: pass elif self.ax1.images[-1].colorbar is not None: self.ax1.images[-1].colorbar.remove() if self.ax2.images == []: pass elif self.ax2.images[-1].colorbar is not None: self.ax2.images[-1].colorbar.remove() im1 = self.ax1.imshow(self.cube[0].list[t].file.data, cmap="Greys_r") im2 = self.ax2.imshow(self.cube[1].list[t].file.data, cmap="Greys_r") self.fig.colorbar(im1, ax=self.ax1, orientation="horizontal", label="I [DNs]") self.fig.colorbar(im2, ax=self.ax2, orientation="horizontal", label="I [DNs]") class AtmosViewer: """ This visualisation tool is for the investigation of atmospheric parameters found via inversion techniques. This makes use of the ``Inversion`` class. This assumes that there are three atmospheric parameters in the inversion: electron number density, electron temperature and bulk line-of-sight velocity. These are the estimated quantities by RADYNVERSION. :param filename: The inversion file to be used. :type filename: str or Inversion :param z: The physical height grid of the estimated atmospheric parameters in megametres. Can only be None if filename is already an ``Inversion`` instance. Default is None. (N.B. the RADYNVERSION height grid is available from ``crispy.radynversion.utils``). :type z: numpy.ndarray or None, optional :param wcs: The world coordinate system that the inversion parameters are defined by. Can be None only if filename is already an ``Inversion`` instance. Default is None. :type wcs: astropy.wcs.WCS or None, optional :param header: The additional header information from the observations. Default is None. :type header: dict or None, optional :param eb: Whether or not to plot the errorbars on the parameter profiles. Default is False. :type eb: bool, optional :cvar coords: The coordinates selected to produce spectra. :type coords: list[tuple] :cvar px_coords: The coordinates selected to produce spectra in pixel space. This is important for indexing the data later to get the correct spectra. :type px_coords: list[tuple] :cvar shape_type: The spectra can be selected for a single point or for a box with specified dimensions with top-left corner where the user clicks. This attribute tells the user which point is described by which shape. :type shape_type: list[str] """ def __init__(self, filename, z=None, wcs=None, header=None, eb=False): plt.style.use("bmh") shape = widgets.Dropdown(options=["point", "box"], value="point", description="Shape: ") if type(filename) == str: assert z is not None assert header is not None self.inv = Inversion(filename=filename, wcs=wcs, z=z, header=header) elif type(filename) == Inversion: self.inv = filename self.coords = [] self.px_coords = [] self.shape_type = [] self.box_coords = [] self.colour_idx = 0 self.n = 0 self.eb = eb self.fig = plt.figure(figsize=(8,10)) self.gs = self.fig.add_gridspec(nrows=5, ncols=3) self.ax1 = self.fig.add_subplot(self.gs[:2, 0], projection=self.inv.wcs.dropaxis(-1)) self.ax2 = self.fig.add_subplot(self.gs[:2, 1], projection=self.inv.wcs.dropaxis(-1)) self.ax3 = self.fig.add_subplot(self.gs[:2, 2], projection=self.inv.wcs.dropaxis(-1)) self.ax1.set_ylabel("Helioprojective Latitude [arcsec]") self.ax1.set_xlabel("Helioprojective Longitude [arcsec]") self.ax2.set_xlabel("Helioprojective Longitude [arcsec]") self.ax3.set_xlabel("Helioprojective Longitude [arcsec]") self.ax2.tick_params(axis="y", labelleft=False) self.ax3.tick_params(axis="y", labelleft=False) self.ax4 = self.fig.add_subplot(self.gs[2, :]) self.ax4.set_ylabel(r"log $n_{e}$ [cm$^{-3}$]") self.ax4.yaxis.set_label_position("right") self.ax4.yaxis.tick_right() self.ax5 = self.fig.add_subplot(self.gs[3, :]) self.ax5.set_ylabel(r"log T [K]") self.ax5.yaxis.set_label_position("right") self.ax5.yaxis.tick_right() self.ax6 = self.fig.add_subplot(self.gs[4, :]) self.ax6.set_ylabel(r"v [km s$^{-1}$]") self.ax6.set_xlabel(r"z [Mm]") self.ax6.yaxis.set_label_position("right") self.ax6.yaxis.tick_right() self.ax4.tick_params(axis="x", labelbottom=False, direction="in") self.ax5.tick_params(axis="x", labelbottom=False, direction="in") self.ax6.tick_params(axis="both", direction="in") widgets.interact(self._img_plot, z = widgets.SelectionSlider(options=np.round(self.inv.z, decimals=3), description="Image height [Mm]: ", style={"description_width" : "initial"}, layout=widgets.Layout(width="50%"))) widgets.interact(self._shape, opts=shape) self.receiver = self.fig.canvas.mpl_connect("button_press_event", self._on_click) x = widgets.IntText(value=1, min=1, max=self.inv.ne.shape[-1], description="x [pix]") y = widgets.IntText(value=1, min=1, max=self.inv.ne.shape[-2], description="y [pix]") outx = widgets.interactive_output(self._boxx, {"x" : x}) outy = widgets.interactive_output(self._boxy, {"y" : y}) display(widgets.HBox([x, y])) done_button = widgets.Button(description="Done") done_button.on_click(self._disconnect_matplotlib) clear_button = widgets.Button(description='Clear') clear_button.on_click(self._clear) save_button = widgets.Button(description="Save") save_button.on_click(self._save) display(widgets.HBox([done_button, clear_button, save_button])) widgets.interact(self._file_name, fn = widgets.Text(description="Filename to save as: ", style={"description_width" : "initial"}), layout=widgets.Layout(width="50%")) def _on_click(self, event): if self.fig.canvas.manager.toolbar.mode != "": return if self.shape == "point": if self.colour_idx > len(pt_bright_cycler)-1: self.colour_idx = 0 self.n += 1 centre_coord = int(event.ydata), int(event.xdata) self.px_coords.append(centre_coord) circ1 = patches.Circle(centre_coord[::-1], radius=10, facecolor=list(pt_bright_cycler)[self.colour_idx]["color"], edgecolor="k", linewidth=1) circ2 = patches.Circle(centre_coord[::-1], radius=10, facecolor=list(pt_bright_cycler)[self.colour_idx]["color"], edgecolor="k", linewidth=1) circ3 = patches.Circle(centre_coord[::-1], radius=10, facecolor=list(pt_bright_cycler)[self.colour_idx]["color"], edgecolor="k", linewidth=1) self.ax1.add_patch(circ1) self.ax2.add_patch(circ2) self.ax3.add_patch(circ3) font = { "size" : 12, "color" : list(pt_bright_cycler)[self.colour_idx]["color"] } txt_1 = self.ax1.text(centre_coord[1]+20, centre_coord[0]+10, s=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", fontdict=font) txt_2 = self.ax2.text(centre_coord[1]+20, centre_coord[0]+10, s=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", fontdict=font) txt_3 = self.ax3.text(centre_coord[1]+20, centre_coord[0]+10, s=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", fontdict=font) txt_1.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) txt_2.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) txt_3.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) if self.eb: self.ax4.errorbar(self.inv.z, self.inv.ne[:,centre_coord[0], centre_coord[1]], yerr=self.inv.err[:,centre_coord[0],centre_coord[1],0], marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax5.errorbar(self.inv.z, self.inv.temp[:,centre_coord[0], centre_coord[1]], yerr=self.inv.err[:,centre_coord[0],centre_coord[1],1], marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax6.errorbar(self.inv.z, self.inv.vel[:,centre_coord[0],centre_coord[1]], yerr=self.inv.err[:,centre_coord[0],centre_coord[1],2], marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) else: self.ax4.plot(self.inv.z, self.inv.ne[:,centre_coord[0],centre_coord[1]], marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax5.plot(self.inv.z, self.inv.temp[:,centre_coord[0],centre_coord[1]], marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax6.plot(self.inv.z, self.inv.vel[:,centre_coord[0], centre_coord[1]], marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax4.legend() self.ax5.legend() self.ax6.legend() px = self.inv.to_lonlat(*centre_coord) << u.arcsec self.colour_idx += 1 self.coords.append(px) self.fig.canvas.draw() elif self.shape == "box": if self.colour_idx > len(pt_bright_cycler)-1: self.colour_idx = 0 self.n += 1 box_anchor = int(event.ydata), int(event.xdata) self.px_coords.append(box_anchor) self.shape_type.append("box") # obtain the coordinates of the box on a grid with pixels the size of the box to make sure there is not copies of the same box box_coord = box_anchor[0] // self.boxy, box_anchor[1] // self.boxx if box_coord in self.box_coords: coords = [p.get_xy() for p in self.ax1.patches] for p in self.ax1.patches: if p.get_xy() == box_anchor: p.remove() idx = self.box_coords.index(box_coord) del self.box_coords[idx] del self.px_coords[idx] del self.shape_type[idx] del self.coords[idx] return self.coords.append(self.inv.to_lonlat(*box_anchor) << u.arcsec) rect1 = patches.Rectangle(box_anchor[::-1], self.boxx, self.boxy, linewidth=2, edgecolor=list(pt_bright_cycler)[self.colour_idx]["color"], facecolor="none") rect1.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) rect2 = patches.Rectangle(box_anchor[::-1], self.boxx, self.boxy, linewidth=2, edgecolor=list(pt_bright_cycler)[self.colour_idx]["color"], facecolor="none") rect2.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) rect3 = patches.Rectangle(box_anchor[::-1], self.boxx, self.boxy, linewidth=2, edgecolor=list(pt_bright_cycler)[self.colour_idx]["color"], facecolor="none") rect3.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) self.ax1.add_patch(rect1) self.ax2.add_patch(rect2) self.ax3.add_patch(rect3) font = { "size" : 12, "color" : list(pt_bright_cycler)[self.colour_idx]["color"] } txt1 = self.ax1.text(box_anchor[1]-50, box_anchor[0]-10, s=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", fontdict=font) txt1.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) txt2 = self.ax2.text(box_anchor[1]-50, box_anchor[0]-10, s=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", fontdict=font) txt2.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) txt3 = self.ax3.text(box_anchor[1]-50, box_anchor[0]-10, s=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", fontdict=font) txt3.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) if self.eb: self.ax4.errorbar(self.inv.z, np.mean(self.inv.ne[:,box_anchor[0]:box_anchor[0]+self.boxy, box_anchor[1]:box_anchor[1]+self.boxx], axis=(0,1)), yerr=np.mean(self.inv.err[:,box_anchor[0]:box_anchor[0]+self.boxy,box_anchor[1]:box_anchor[1]+self.boxx,0], axis=(0,1)), marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax5.errorbar(self.inv.z, np.mean(self.inv.temp[:,box_anchor[0]:box_anchor[0]+self.boxy, box_anchor[1]:box_anchor[1]+self.boxx], axis=(0,1)), yerr=np.mean(self.inv.err[box_anchor[0]:box_anchor[0]+self.boxy,box_anchor[1]:box_anchor[1]+self.boxx,1], axis=(0,1)), marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax6.errorbar(self.inv.z, np.mean(self.inv.vel[:,box_anchor[0]:box_anchor[0]+self.boxy, box_anchor[1]:box_anchor[1]+self.boxx], axis=(0,1)), yerr=np.mean(self.inv.err[:,box_anchor[0]:box_anchor[0]+self.boxy,box_anchor[1]:box_anchor[1]+self.boxx,2], axis=(0,1)), marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) else: self.ax4.plot(self.inv.z, np.mean(self.inv.ne[:,box_anchor[0]:box_anchor[0]+self.boxy, box_anchor[1]:box_anchor[1]+self.boxx], axis=(1,2)), marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax5.plot(self.inv.z, np.mean(self.inv.temp[:,box_anchor[0]:box_anchor[0]+self.boxy, box_anchor[1]:box_anchor[1]+self.boxx], axis=(1,2)), marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax6.plot(self.inv.z, np.mean(self.inv.vel[:,box_anchor[0]:box_anchor[0]+self.boxy, box_anchor[1]:box_anchor[1]+self.boxx], axis=(1,2)), marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax4.legend() self.ax5.legend() self.ax6.legend() self.colour_idx += 1 self.fig.canvas.draw() def _shape(self, opts): self.shape = opts def _boxx(self, x): self.boxx = x def _boxy(self, y): self.boxy = y def _disconnect_matplotlib(self, _): self.fig.canvas.mpl_disconnect(self.receiver) def _clear(self, _): self.coords = [] self.px_coords = [] self.shape_type = [] self.box_coords = [] self.colour_idx = 0 self.n = 0 while len(self.ax1.patches) > 0: for p in self.ax1.patches: p.remove() while len(self.ax2.patches) > 0: for p in self.ax2.patches: p.remove() while len(self.ax3.patches) > 0: for p in self.ax3.patches: p.remove() while len(self.ax1.texts) > 0: for t in self.ax1.texts: t.remove() while len(self.ax2.texts) > 0: for t in self.ax2.texts: t.remove() while len(self.ax3.texts) > 0: for t in self.ax3.texts: t.remove() self.ax4.clear() self.ax4.set_ylabel(r"log n$_{e}$ [cm$^{-3}$]") self.ax5.clear() self.ax5.set_ylabel(r"log T [K]") self.ax6.clear() self.ax6.set_ylabel(r"v [km s$^{-1}$]") self.ax6.set_xlabel(r"z [Mm]") self.fig.canvas.draw() self.fig.canvas.flush_events() def _save(self, _): self.fig.savefig(self.filename, dpi=300) def _file_name(self, fn): self.filename = fn def _img_plot(self, z): if self.ax1.images == []: pass elif self.ax1.images[-1].colorbar != None: self.ax1.images[-1].colorbar.remove() if self.ax2.images == []: pass elif self.ax2.images[-1].colorbar != None: self.ax2.images[-1].colorbar.remove() if self.ax3.images == []: pass elif self.ax3.images[-1].colorbar != None: self.ax3.images[-1].colorbar.remove() z_idx = int(np.where(np.round(self.inv.z, decimals=3) == np.round(z, decimals=3))[0]) im1 = self.ax1.imshow(self.inv.ne[z_idx], cmap="cividis") self.fig.colorbar(im1, ax=self.ax1, orientation="horizontal", label=r"log $n_{e}$ [cm$^{-3}$]") im2 = self.ax2.imshow(self.inv.temp[z_idx], cmap="hot") self.fig.colorbar(im2, ax=self.ax2, orientation="horizontal", label=r"log T [K]") im3 = self.ax3.imshow(self.inv.vel[z_idx], cmap="RdBu", clim=(-np.max(self.inv.vel[z_idx]), np.max(self.inv.vel[z_idx]))) self.fig.colorbar(im3, ax=self.ax3, orientation="horizontal", label=r"v [km s$^{-1}$]") class ImageViewer: """ This visualiser only views the images for data, not the spectra. For use when interested only in imaging data. Includes sliders to change the wavelength of the observation. :param data: The data to explore, this can be either one or two spectral lines (support for more than two can be added if required). This is the only required argument to view the data. :type data: str or list or CRISP or CRISPSequence or CRISPNonU or CRISPNonUSequence :param wcs: A prescribed world coordinate system. If None, the world coordinate system is derived from the data. Default is None. :type wcs: astropy.wcs.WCS or None, optional :param uncertainty: The uncertainty in the intensity values of the data. Default is None. :type uncertainty: numpy.ndarray or None, optional :param mask: A mask to be used on the data. Default is None. :type mask: numpy.ndarray or None, optional :param nonu: Whether or not the spectral axis is non-uniform. Default is False. :type nonu: bool, optional """ def __init__(self, data, wcs=None, uncertainty=None, mask=None, nonu=False): plt.style.use("bmh") self.aa = html.unescape("&#8491;") self.l = html.unescape("&lambda;") self.a = html.unescape("&alpha;") self.D = html.unescape("&Delta;") if not nonu: if type(data) == str: self.cube = CRISP(filename=data, wcs=wcs, uncertainty=uncertainty, mask=mask) if self.cube.file.data.ndim == 3: self.wvls = self.cube.wave(np.arange(self.cube.shape[0])) << u.Angstrom elif self.cube.file.data.ndim == 4: self.wvls = self.cube.wave(np.arange(self.cube.shape[1])) << u.Angstrom elif type(data) == list: data = CRISP_sequence_constructor(data, wcs=wcs, uncertainty=uncertainty, mask=mask, nonu=nonu) self.cube = CRISPSequence(files=data) if self.cube.list[0].file.data.ndim == 3: self.wvls1 = self.cube.list[0].wave(np.arange(self.cube.list[0].shape[0])) << u.Angstrom elif self.cube.list[0].file.data.ndim == 4: self.wvls1 = self.cube.list[0].wave(np.arange(self.cube.list[0].shape[1])) << u.Angstrom if self.cube.list[1].file.data.ndim == 3: self.wvls2 = self.cube.list[1].wave(np.arange(self.cube.list[1].shape[0])) << u.Angstrom elif self.cube.list[1].file.data.ndim == 4: self.wvls2 = self.cube.list[1].wave(np.arange(self.cube.list[1].shape[1])) << u.Angstrom elif type(data) == CRISP: self.cube = data if self.cube.file.data.ndim == 3: self.wvls = self.cube.wave(np.arange(self.cube.shape[0])) << u.Angstrom elif self.cube.file.data.ndim == 4: self.wvls = self.cube.wave(np.arange(self.cube.shape[1])) << u.Angstrom elif type(data) == CRISPSequence: self.cube = data if self.cube.list[0].file.data.ndim == 3: self.wvls1 = self.cube.list[0].wave(np.arange(self.cube.list[0].shape[0])) << u.Angstrom elif self.cube.list[0].file.data.ndim == 4: self.wvls1 = self.cube.list[0].wave(np.arange(self.cube.list[0].shape[1])) << u.Angstrom if self.cube.list[1].file.data.ndim == 3: self.wvls2 = self.cube.list[1].wave(np.arange(self.cube.list[1].shape[0])) << u.Angstrom elif self.cube.list[1].file.data.ndim == 4: self.wvls2 = self.cube.list[1].wave(np.arange(self.cube.list[1].shape[1])) << u.Angstrom else: if type(data) == str: self.cube = CRISPNonU(filename=data, wcs=wcs, uncertainty=uncertainty, mask=mask) if self.cube.file.data.ndim == 3: self.wvls = self.cube.wave(np.arange(self.cube.shape[0])) << u.Angstrom elif self.cube.file.data.ndim == 4: self.wvls = self.cube.wave(np.arange(self.cube.shape[1])) << u.Angstrom elif type(data) == list: data = CRISP_sequence_constructor(data, wcs=wcs, uncertainty=uncertainty, mask=mask, nonu=nonu) self.cube = CRISPNonUSequence(files=data) if self.cube.list[0].file.data.ndim == 3: self.wvls1 = self.cube.list[0].wave(np.arange(self.cube.list[0].shape[0])) << u.Angstrom elif self.cube.list[0].file.data.ndim == 4: self.wvls1 = self.cube.list[0].wave(np.arange(self.cube.list[0].shape[1])) << u.Angstrom if self.cube.list[1].file.data.ndim == 3: self.wvls2 = self.cube.list[1].wave(np.arange(self.cube.list[1].shape[0])) << u.Angstrom elif self.cube.list[1].file.data.ndim == 4: self.wvls2 = self.cube.list[1].wave(np.arange(self.cube.list[1].shape[1])) << u.Angstrom elif type(data) == CRISPNonU: self.cube = data if self.cube.file.data.ndim == 3: self.wvls = self.cube.wave(np.arange(self.cube.shape[0])) << u.Angstrom elif self.cube.file.data.ndim == 4: self.wvls = self.cube.wave(np.arange(self.cube.shape[1])) << u.Angstrom elif type(data) == CRISPNonUSequence: self.cube = data if self.cube.list[0].file.data.ndim == 3: self.wvls1 = self.cube.list[0].wave(np.arange(self.cube.list[0].shape[0])) << u.Angstrom elif self.cube.list[0].file.data.ndim == 4: self.wvls1 = self.cube.list[0].wave(np.arange(self.cube.list[0].shape[1])) << u.Angstrom if self.cube.list[1].file.data.ndim == 3: self.wvls2 = self.cube.list[1].wave(np.arange(self.cube.list[1].shape[0])) << u.Angstrom elif self.cube.list[1].file.data.ndim == 4: self.wvls2 = self.cube.list[1].wave(np.arange(self.cube.list[1].shape[1])) << u.Angstrom if type(self.cube) == CRISP or type(self.cube) == CRISPNonU: self.fig = plt.figure(figsize=(8,10)) try: self.ax1 = self.fig.add_subplot(1, 1, 1, projection=self.cube.wcs.dropaxis(-1)) except: self.ax1 = self.fig.add_subplot(1, 1, 1, projection=SlicedLowLevelWCS(self.cube[0].wcs.low_level_wcs, 0)) self.ax1.set_ylabel("Helioprojective Latitude [arcsec]") self.ax1.set_xlabel("Helioprojective Longitude [arcsec]") ll = widgets.SelectionSlider(options=[np.round(l - np.median(self.wvls), decimals=2).value for l in self.wvls], description = f"{self.D} {self.l} [{self.aa}]") out1 = widgets.interactive_output(self._img_plot1, {"ll" : ll}) display(widgets.HBox([ll])) elif type(self.cube) == CRISPSequence or type(self.cube) == CRISPNonUSequence: self.fig = plt.figure(figsize=(8,10)) try: self.ax1 = self.fig.add_subplot(1, 2, 1, projection=self.cube.list[0].wcs.dropaxis(-1)) except: self.ax1 = self.fig.add_subplot(1, 2, 1, projection=SlicedLowLevelWCS(self.cube.list[0][0].wcs.low_level_wcs, 0)) self.ax1.set_ylabel("Helioprojective Latitude [arcsec]") self.ax1.set_xlabel("Helioprojective Longitude [arcsec]") try: self.ax2 = self.fig.add_subplot(1, 2, 2, projection=self.cube.list[1].wcs.dropaxis(-1)) except: self.ax2 = self.fig.add_subplot(1, 2, 2, projection=SlicedLowLevelWCS(self.cube.list[1][0].wcs.low_level_wcs, 0)) self.ax2.set_ylabel("Helioprojective Latitude [arcsec]") self.ax2.set_xlabel("Helioprojective Longitude [arcsec]") ll1 = widgets.SelectionSlider( options=[np.round(l - np.median(self.wvls1), decimals=2).value for l in self.wvls1], description=fr"{self.D} {self.l}$_{1}$ [{self.aa}]", style={"description_width" : "initial"} ) ll2 = widgets.SelectionSlider( options=[np.round(l - np.median(self.wvls2), decimals=2).value for l in self.wvls2], description=fr"{self.D} {self.l}$_{2}$ [{self.aa}]", style={"description_width" : "initial"} ) out1 = widgets.interactive_output(self._img_plot2, {"ll1" : ll1, "ll2" : ll2}) display(widgets.HBox([widgets.VBox([ll1, ll2])])) done_button = widgets.Button(description="Done") done_button.on_click(self._disconnect_matplotlib) save_button = widgets.Button(description="Save") save_button.on_click(self._save) display(widgets.HBox([done_button, save_button])) widgets.interact(self._file_name, fn= widgets.Text(description="Filename to save as: ", style={"description_width" : "initial"}, layout=widgets.Layout(width="50%"))) def _disconnect_matplotlib(self, _): self.fig.canvas.mpl_disconnect(self.receiver) def _save(self, _): self.fig.savefig(self.filename, dpi=300) def _file_name(self, fn): self.filename = fn def _img_plot1(self, ll): if self.ax1.images == []: pass elif self.ax1.images[-1].colorbar is not None: self.ax1.images[-1].colorbar.remove() ll_idx = int(np.where(np.round(self.wvls, decimals=2).value == np.round(np.median(self.wvls).value + ll, decimals=2))[0]) try: data = self.cube.file.data[ll_idx].astype(np.float) data[data < 0] = np.nan im1 = self.ax1.imshow(data, cmap="Greys_r") except: data = self.cube.file.data[0, ll_idx].astype(np.float) data[data < 0] = np.nan im1 = self.ax1.imshow(data, cmap="Greys_r") try: el = self.cube.file.header["WDESC1"] except KeyError: el = self.cube.file.header["element"] self.ax1.set_title(fr"{el} {self.aa} {self.D} {self.l}$_{1}$ = {ll} {self.aa}") self.fig.colorbar(im1, ax=self.ax1, orientation="horizontal", label="Intensity [DNs]") def _img_plot2(self, ll1, ll2): if self.ax1.images == []: pass elif self.ax1.images[-1].colorbar is not None: self.ax1.images[-1].colorbar.remove() if self.ax2.images == []: pass elif self.ax2.images[-1].colorbar is not None: self.ax2.images[-1].colorbar.remove() ll1_idx = int(np.where(np.round(self.wvls1, decimals=2).value == np.round(np.median(self.wvls1).value + ll1, decimals=2))[0]) ll2_idx = int(np.where(np.round(self.wvls2, decimals=2).value == np.round(np.median(self.wvls2).value + ll2, decimals=2))[0]) try: data = self.cube.list[0].file.data[ll1_idx].astype(np.float) data[data < 0] = np.nan im1 = self.ax1.imshow(data, cmap="Greys_r") except: data = self.cube.list[0].file.data[0, ll1_idx].astype(np.float) data[data < 0] = np.nan im1 = self.ax1.imshow(data, cmap="Greys_r") try: data = self.cube.list[1].file.data[ll2_idx].astype(np.float) data[data < 0] = np.nan im2 = self.ax2.imshow(data, cmap="Greys_r") except: data = self.cube.list[1].file.data[0, ll2_idx].astype(np.float) data[data < 0] = np.nan im2 = self.ax2.imshow(data, cmap="Greys_r") try: el1 = self.cube.list[0].file.header["WDESC1"] el2 = self.cube.list[1].file.header["WDESC1"] except KeyError: el1 = self.cube.list[0].file.header["element"] el2 = self.cube.list[1].file.header["element"] self.ax1.set_title(fr"{el1} {self.aa} {self.D} {self.l}$_{1}$ = {ll1} {self.aa}") self.ax2.set_title(fr"{el2} {self.aa} {self.D} {self.l}$_{2}$ = {ll2} {self.aa}") self.fig.colorbar(im1, ax=self.ax1, orientation="horizontal", label="Intensity [DNs]") self.fig.colorbar(im2, ax=self.ax2, orientation="horizontal", label="Intensity [DNs]") class SpectralTimeViewer: """ Imaging spectroscopic viewer. SpectralTimeViewer should be used when one wants to click on points of an image and have the spectrum displayed for that point and the time series for a certain time range of observations. This works **exclusively** in Jupyter notebook but can be a nice data exploration tool. This viewer utilises the data structures defined in `crispy.crisp` and has many variable options. :param data1: The data to explore, this is one spectral line. This is the only required argument to view the data. :type data1: list or CRISPSequence or CRISPNonUSequence :param data2: If there is a second set of data to explore. :type data2: list or CRISPSequence or CRISPNonUSequence :param wcs: A prescribed world coordinate system. If None, the world coordinate system is derived from the data. Default is None. :type wcs: astropy.wcs.WCS or None, optional :param uncertainty: The uncertainty in the intensity values of the data. Default is None. :type uncertainty: numpy.ndarray or None, optional :param mask: A mask to be used on the data. Default is None. :type mask: numpy.ndarray or None, optional :param nonu: Whether or not the spectral axis is non-uniform. Default is False. :type nonu: bool, optional :cvar coords: The coordinates selected to produce spectra. :type coords: list[tuple] :cvar px_coords: The coordinates selected to produce spectra in pixel space. This is important for indexing the data later to get the correct spectra. :type px_coords: list[tuple] :cvar shape_type: The spectra can be selected for a single point or for a box with specified dimensions with top-left corner where the user clicks. This attribute tells the user which point is described by which shape. :type shape_type: list[str] """ def __init__(self, data1, data2=None, wcs=None, uncertainty=None, mask=None, nonu=False): plt.style.use("bmh") self.aa = html.unescape("&#8491;") self.l = html.unescape("&lambda;") self.a = html.unescape("&alpha;") self.D = html.unescape("&Delta;") shape = widgets.Dropdown(options=["point", "box"], value="point", description="Shape: ") if not nonu: if type(data1) == list: data1 = CRISP_sequence_constructor(data1, wcs=wcs, uncertainty=uncertainty, mask=mask, nonu=nonu) self.cube1 = CRISPSequence(files=data1) if self.cube1.list[0].file.data.ndim == 3: self.wvls1 = self.cube1.list[0].wave(np.arange(self.cube1.list[0].shape[0])) << u.Angstrom elif self.cube1.list[0].file.data.ndim == 4: self.wvls1 = self.cube1.list[0].wave(np.arange(self.cube1.list[0].shape[1])) << u.Angstrom elif type(data1) == CRISPSequence: self.cube1 = data1 if self.cube1.list[0].file.data.ndim == 3: self.wvls1 = self.cube1.list[0].wave(np.arange(self.cube1.list[0].shape[0])) elif self.cube1.list[0].file.data.ndim == 4: self.wvls1 = self.cube1.list[0].wave(np.arange(self.cube1.list[0].shape[1])) if data2 == None: pass elif type(data2) == list: data2 = CRISP_sequence_constructor(data2, wcs=wcs, uncertainty=uncertainty, mask=mask, nonu=nonu) self.cube2 = CRISPSequence(files=data2) if self.cube2.list[0].file.data.ndim == 3: self.wvls2 = self.cube2.list[0].wave(np.arange(self.cube2.list[0].shape[0])) elif self.cube2.list[0].file.data.ndim == 4: self.wvls2 = self.cube2.list[0].wave(np.arange(self.cube2.list[0].shape[1])) elif type(data2) == CRISPSequence: self.cube2 = data2 if self.cube2.list[0].file.data.ndim == 3: self.wvls2 = self.cube2.list[0].wave(np.arange(self.cube2.list[0].shape[0])) elif self.cube2.list[0].file.data.ndim == 4: self.wvls2 = self.cube2.list[0].wave(np.arange(self.cube2.list[0].shape[1])) else: if type(data1) == list: data1 = CRISP_sequence_constructor(data1, wcs=wcs, uncertainty=uncertainty, mask=mask, nonu=nonu) self.cube1 = CRISPNonUSequence(files=data1) if self.cube1.list[0].file.data.ndim == 3: self.wvls1 = self.cube1.list[0].wave(np.arange(self.cube1.list[0].shape[0])) << u.Angstrom elif self.cube1.list[0].file.data.ndim == 4: self.wvls1 = self.cube1.list[0].wave(np.arange(self.cube1.list[0].shape[1])) << u.Angstrom elif type(data1) == CRISPNonUSequence: self.cube1 = data if self.cube1.list[0].file.data.ndim == 3: self.wvls1 = self.cube1.list[0].wave(np.arange(self.cube1.list[0].shape[0])) << u.Angstrom elif self.cube1.list[0].file.data.ndim == 4: self.wvls1 = self.cube1.list[0].wave(np.arange(self.cube1.list[0].shape[1])) << u.Angstrom if data2 == None: pass elif type(data2) == list: data2 = CRISP_sequence_constructor(data2, wcs=wcs, uncertainty=uncertainty, mask=mask, nonu=nonu) self.cube2 = CRISPNonUSequence(files=data2) if self.cube2.list[0].file.data.ndim == 3: self.wvls2 = self.cube2.list[0].wave(np.arange(self.cube2.list[0].shape[0])) elif self.cube2.list[0].file.data.ndim == 4: self.wvls2 = self.cube2.list[0].wave(np.arange(self.cube2.list[0].shape[1])) elif type(data2) == CRISPNonUSequence: self.cube2 = data2 if self.cube2.list[0].file.data.ndim == 3: self.wvls2 = self.cube2.list[0].wave(np.arange(self.cube2.list[0].shape[0])) elif self.cube2.list[0].file.data.ndim == 4: self.wvls2 = self.cube2.list[0].wave(np.arange(self.cube2.list[0].shape[1])) if data2 == None: self.fig = plt.figure(figsize=(8,10)) self.gs = self.fig.add_gridspec(nrows=2, ncols=2) if self.cube1.list[0].file.data.ndim == 3: self.ax1 = self.fig.add_subplot(self.gs[0,0], projection=self.cube1.list[0].wcs.dropaxis(-1)) elif self.cube1.list[0].file.data.ndim == 4: self.ax1 = self.fig.add_subplot(self.gs[0,0], projection=SlicedLowLevelWCS(self.cube1.list[0][0].wcs.low_level_wcs, 0)) self.ax1.set_ylabel("Helioprojective Latitude [arcsec]") self.ax1.set_xlabel("Helioprojective Longitude [arcsec]") self.ax2 = self.fig.add_subplot(self.gs[0,1]) self.ax2.yaxis.set_label_position("right") self.ax2.yaxis.tick_right() self.ax2.set_ylabel("I [DNs]") self.ax2.set_xlabel(f"{self.l} [{self.aa}]") self.ax2.tick_params(direction="in") self.ax3 = self.fig.add_subplot(self.gs[1,:]) self.ax3.set_ylabel("I [DNs]") self.ax3.set_xlabel("Time [UTC]") self.ll = widgets.SelectionSlider(options=[np.round(l - np.median(self.wvls1), decimals=2).value for l in self.wvls1], description = f"{self.D} {self.l} [{self.aa}]") self.t = widgets.IntSlider(value=0, min=0, max=len(self.cube1.list)-1, step=1, description="Time index: ", disabled=False) try: self.times1 = [date2num(f.file.header["DATE-AVG"]) for f in self.cube1.list] except KeyError: self.times1 = [date2num(f.file.header["date_obs"]+" "+f.file.header["time_obs"]) for f in self.cube1.list] out1 = widgets.interactive_output(self._img_plot1, {"ll" : self.ll, "t" : self.t}) out2 = widgets.interactive_output(self._shape, {"opts" : shape}) display(widgets.HBox([widgets.VBox([self.ll,self.t]), shape])) else: self.fig = plt.figure(figsize=(8,10)) self.gs = self.fig.add_gridspec(nrows=3, ncols=2) try: self.ax1 = self.fig.add_subplot(self.gs[0,0], projection=self.cube1.list[0].wcs.dropaxis(-1)) except: self.ax1 = self.fig.add_subplot(self.gs[0,0], projection=SlicedLowLevelWCS(self.cube1.list[0][0].wcs.low_level_wcs, 0)) self.ax1.set_ylabel("Helioprojective Latitude [arcsec]") self.ax1.set_xlabel("Helioprojective Longitude [arcsec]") self.ax1.xaxis.set_label_position("top") self.ax1.xaxis.tick_top() try: self.ax2 = self.fig.add_subplot(self.gs[1,0], projection=self.cube2.list[0].wcs.dropaxis(-1)) except: self.ax2 = self.fig.add_subplot(self.gs[1,0], projection=SlicedLowLevelWCS(self.cube2.list[0][0].wcs.low_level_wcs, 0)) self.ax2.set_ylabel("Helioprojective Latitude [arcsec]") self.ax2.set_xlabel("Helioprojective Longitude [arcsec]") self.ax3 = self.fig.add_subplot(self.gs[0,1]) self.ax3.yaxis.set_label_position("right") self.ax3.yaxis.tick_right() self.ax3.set_ylabel("Intensity [DNs]") self.ax3.set_xlabel(f"{self.l} [{self.aa}]") self.ax3.xaxis.set_label_position("top") self.ax3.xaxis.tick_top() self.ax3.tick_params(direction="in") self.ax4 = self.fig.add_subplot(self.gs[1,1]) self.ax4.yaxis.set_label_position("right") self.ax4.yaxis.tick_right() self.ax4.set_ylabel("Intensity [DNs]") self.ax4.set_xlabel(f"{self.l} [{self.aa}]") self.ax4.tick_params(direction="in") self.ax5 = self.fig.add_subplot(self.gs[2,:]) self.ax5.set_ylabel("Intensity [DNs]") self.ax5.set_xlabel("Time [UTC]") self.ax5b = self.ax5.twinx() self.ax5b.set_ylabel("Intensity [DNs]") self.ll1 = widgets.SelectionSlider( options=[np.round(l - np.median(self.wvls1), decimals=2).value for l in self.wvls1], description=fr"{self.aa} {self.D} {self.l}$_{1}$ [{self.aa}]", style={"description_width" : "initial"} ) self.ll2 = widgets.SelectionSlider( options=[np.round(l - np.median(self.wvls2), decimals=2).value for l in self.wvls2], description=fr"{self.aa} {self.D} {self.l}$_{2}$ [{self.aa}]", style={"description_width" : "initial"} ) self.t1 = widgets.IntSlider(value=0, min=0, max=len(self.cube1.list)-1, step=1, disabled=False, description=r"t$_{1}$ index: ") self.t2 = widgets.IntSlider(value=0, min=0, max=len(self.cube2.list)-1, step=1, disabled=False, description=r"t$_{2}$ index: ") try: self.times1 = [date2num(f.file.header["DATE-AVG"]) for f in self.cube1.list] self.times2 = [date2num(f.file.header["DATE-AVG"]) for f in self.cube2.list] except KeyError: self.times1 = [date2num(f.file.header["date_obs"]+" "+f.file.header["time_obs"]) for f in self.cube1.list] self.times2 = [date2num(f.file.header["date_obs"]+" "+f.file.header["time_obs"]) for f in self.cube2.list] out1 = widgets.interactive_output(self._img_plot2, {"ll1" : self.ll1, "ll2" : self.ll2, "t1" : self.t1, "t2" : self.t2}) out2 = widgets.interactive_output(self._shape, {"opts" : shape}) display(widgets.HBox([widgets.VBox([widgets.HBox([self.ll1, self.ll2]),widgets.HBox([self.t1, self.t2])]), shape])) self.coords = [] self.px_coords = [] self.shape_type = [] self.box_coords = [] self.colour_idx = 0 self.n = 0 self.receiver = self.fig.canvas.mpl_connect("button_press_event", self._on_click) x = widgets.IntText(value=1, min=1, max=self.cube1.list[0].shape[-1], description="x [pix]") y = widgets.IntText(value=1, min=1, max=self.cube1.list[0].shape[-2], description="y [pix]") outx = widgets.interactive_output(self._boxx, {"x" : x}) outy = widgets.interactive_output(self._boxy, {"y" : y}) display(widgets.HBox([x, y])) done_button = widgets.Button(description="Done") done_button.on_click(self._disconnect_matplotlib) clear_button = widgets.Button(description="Clear") clear_button.on_click(self._clear) save_button = widgets.Button(description="Save") save_button.on_click(self._save) display(widgets.HBox([done_button, clear_button, save_button])) widgets.interact(self._file_name, fn= widgets.Text(description="Filename to save as: ", style={"description_width" : "initial"}, layout=widgets.Layout(width="50%"))) def _on_click(self, event): if self.fig.canvas.manager.toolbar.mode != "": return if not hasattr(self, "cube2"): if self.shape == "point": if self.colour_idx > len(pt_bright_cycler)-1: self.colour_idx = 0 self.n += 1 centre_coord = int(event.ydata), int(event.xdata) self.px_coords.append(centre_coord) self.shape_type.append("point") circ = patches.Circle(centre_coord[::-1], radius=10, facecolor=list(pt_bright_cycler)[self.colour_idx]["color"], edgecolor="k", linewidth=1) self.ax1.add_patch(circ) font = { "size" : 12, "color" : list(pt_bright_cycler)[self.colour_idx]["color"] } txt = self.ax1.text(centre_coord[1]+20, centre_coord[0]+10, s=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", fontdict=font) txt.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) px = self.cube1.list[self.t.value].to_lonlat(*centre_coord) << u.arcsec if self.cube1.list[0].file.data.ndim == 3: self.ax2.plot(self.wvls1, self.cube1.list[self.t.value].file.data[:, centre_coord[0], centre_coord[1]], marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) elif self.cube1.list[0].file.data.ndim == 4: self.ax2.plot(self.wvls1, self.cube1.list[self.t.value].file.data[0, :, centre_coord[0], centre_coord[1]], marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax2.legend() ll_idx = int(np.where(np.round(self.wvls1, decimals=2).value == np.round(np.median(self.wvls1).value + self.ll.value, decimals=2))[0]) if self.cube1.list[0].file.data.ndim == 3: i_time1 = [f.file.data[ll_idx, centre_coord[0], centre_coord[1]] for f in self.cube1.list] self.ax3.plot(self.times1, i_time1, marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) elif self.cube1.list[0].file.data.ndim == 4: i_time1 = [f.file.data[0, ll_idx, centre_coord[0], centre_coord[1]] for f in self.cube1.list] self.ax3.plot(self.times1, i_time1, marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax3.xaxis.set_major_formatter(DateFormatter("%H:%M:%S")) for label in self.ax3.get_xticklabels(): label.set_rotation(40) label.set_horizontalalignment('right') self.coords.append(px) self.colour_idx += 1 self.fig.canvas.draw() elif self.shape == "box": if self.colour_idx > len(pt_bright_cycler)-1: self.colour_idx = 0 self.n += 1 box_anchor = int(event.ydata), int(event.xdata) self.px_coords.append(box_anchor) self.shape_type.append("box") # obtain the coordinates of the box on a grid with pixels the size of the box to make sure there is not copies of the same box box_coord = box_anchor[0] // self.boxy, box_anchor[1] // self.boxx if box_coord in self.box_coords: coords = [p.get_xy() for p in self.ax1.patches] for p in self.ax.patches: if p.get_xy() == box_anchor: p.remove() idx = self.box_coords.index(box_coord) del self.box_coords[idx] del self.px_coords[idx] del self.shape_type[idx] del self.coords[idx] return self.coords.append(self.cube1.list[0].to_lonlat(*box_anchor) << u.arcsec) rect = patches.Rectangle(box_anchor[::-1], self.boxx, self.boxy, linewidth=2, edgecolor=list(pt_bright_cycler)[self.colour_idx]["color"], facecolor="none") rect.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) self.ax1.add_patch(rect) font = { "size" : 12, "color" : list(pt_bright_cycler)[self.colour_idx]["color"] } txt = self.ax1.text(box_anchor[1]-50, box_anchor[0]-10, s=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", fontdict=font) txt.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) if self.cube1.list[0].file.data.ndim == 3: self.ax2.plot(self.wvls1, np.mean(self.cube1.list[self.t.value].file.data[:,box_anchor[0]:box_anchor[0]+self.boxy,box_anchor[1]:box_anchor[1]+self.boxx],axis=(1,2)), marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) elif self.cube1.list[0].file.data.ndim == 4: self.ax2.plot(self.wvls1, np.mean(self.cube1.list[self.t.value].file.data[0, :,box_anchor[0]:box_anchor[0]+self.boxy,box_anchor[1]:box_anchor[1]+self.boxx],axis=(1,2)), marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax2.legend() ll_idx = int(np.where(np.round(self.wvls1, decimals=2).value == np.round(np.median(self.wvls1).value + self.ll.value, decimals=2))[0]) if self.cube1.list[0].file.data.ndim == 3: i_time1 = [np.mean(f.file.data[ll_idx,box_anchor[0]:box_anchor[0]+self.boxy,box_anchor[1]:box_anchor[1]+self.boxx]) for f in self.cube1.list] self.ax3.plot(self.times1, i_time1, marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) elif self.cube1.list[0].file.data.ndim == 4: i_time1 = [np.mean(f.file.data[0, ll_idx,box_anchor[0]:box_anchor[0]+self.boxy,box_anchor[1]:box_anchor[1]+self.boxx]) for f in self.cube1.list] self.ax3.plot(self.times1, i_time1, marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax3.xaxis.set_major_formatter(DateFormatter("%H:%M:%S")) for label in self.ax3.get_xticklabels(): label.set_rotation(40) label.set_horizontalalignment('right') self.colour_idx += 1 self.fig.canvas.draw() else: if self.shape == "point": if self.colour_idx > len(pt_bright_cycler)-1: self.colour_idx = 0 self.n += 1 centre_coord = int(event.ydata), int(event.xdata) #with WCS, the event data is returned in pixels so we don't need to do the conversion from real world but rather to real world later on self.px_coords.append(centre_coord) circ1 = patches.Circle(centre_coord[::-1], radius=10, facecolor=list(pt_bright_cycler)[self.colour_idx]["color"], edgecolor="k", linewidth=1) circ2 = patches.Circle(centre_coord[::-1], radius=10, facecolor=list(pt_bright_cycler)[self.colour_idx]["color"], edgecolor="k", linewidth=1) self.ax1.add_patch(circ1) self.ax2.add_patch(circ2) font = { "size" : 12, "color" : list(pt_bright_cycler)[self.colour_idx]["color"] } txt_1 = self.ax1.text(centre_coord[1]+20, centre_coord[0]+10, s=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", fontdict=font) txt_2 = self.ax2.text(centre_coord[1]+20, centre_coord[0]+10, s=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", fontdict=font) txt_1.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) txt_2.set_path_effects([PathEffects.withStroke(linewidth=3, foreground="k")]) px = self.cube1.list[0].to_lonlat(*centre_coord) << u.arcsec if self.cube1.list[0].file.data.ndim == 3: self.ax3.plot(self.wvls1, self.cube1.list[self.t1.value].file.data[:, centre_coord[0], centre_coord[1]], marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) elif self.cube1.list[0].file.data.ndim == 4: self.ax3.plot(self.wvls1, self.cube1.list[self.t1.value].file.data[0, :, centre_coord[0], centre_coord[1]], marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) if self.cube2.list[0].file.data.ndim == 3: self.ax4.plot(self.wvls2, self.cube2.list[self.t2.value].file.data[:, centre_coord[0], centre_coord[1]], marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) elif self.cube2.list[0].file.data.ndim == 4: self.ax4.plot(self.wvls2, self.cube2.list[self.t2.value].file.data[0, :, centre_coord[0], centre_coord[1]], marker=Line2D.filled_markers[self.colour_idx+self.n*len(pt_bright_cycler)], label=f"{self.colour_idx+1+(self.n*len(pt_bright_cycler))}", c=list(pt_bright_cycler)[self.colour_idx]["color"]) self.ax3.legend() self.ax4.legend() ll_idx1 = int(np.where(np.round(self.wvls1, decimals=2).value == np.round(np.median(self.wvls1).value + self.ll1.value, decimals=2))[0]) ll_idx2 = int(np.where(np.round(self.wvls2, decimals=2).value == np.round(
np.median(self.wvls2)
numpy.median
import dgl import numpy as np import torch as th from tqdm import tqdm import torch from openhgnn.models import build_model from . import BaseFlow, register_flow from ..utils import EarlyStopping @register_flow("recommendation") class Recommendation(BaseFlow): """Recommendation flows.""" def __init__(self, args=None): super(Recommendation, self).__init__(args) self.target_link = self.task.dataset.target_link self.args.out_node_type = self.task.dataset.out_ntypes self.args.out_dim = self.args.hidden_dim self.model = build_model(self.model_name).build_model_from_args(self.args, self.hg) self.model = self.model.to(self.device) self.reg_weight = 0.1 self.metric = ['recall', 'ndcg'] self.val_metric = 'recall' # self.topk_list = [5, 10, 20, 50, 100] self.topk = 20 #self.evaluator = self.task.get_evaluator(self.metric) self.optimizer = ( th.optim.Adam(self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay) ) self.patience = args.patience self.max_epoch = args.max_epoch self.num_neg = self.task.dataset.num_neg self.user_name = self.task.dataset.user_name self.item_name = self.task.dataset.item_name self.num_user = self.hg.num_nodes(self.user_name) self.num_item = self.hg.num_nodes(self.user_name) self.train_eid_dict = { etype: self.hg.edges(etype=etype, form='eid') for etype in self.hg.canonical_etypes} def preprocess(self): self.train_hg, self.val_hg, self.test_hg = self.task.get_idx() self.train_neg_hg = self.task.dataset.construct_negative_graph(self.train_hg) self.train_hg = self.train_hg.to(self.device) self.val_hg = self.val_hg.to(self.device) self.test_hg = self.test_hg.to(self.device) self.negative_graph = self.train_neg_hg.to(self.device) self.positive_graph = self.train_hg.edge_type_subgraph([self.target_link]) # generage complete user-item graph for evaluation # src, dst = th.arange(self.num_user), th.arange(self.num_item) # src = src.repeat_interleave(self.num_item) # dst = dst.repeat(self.num_user) # self.eval_graph = dgl.heterograph({('user', 'user-item', 'item'): (src, dst)}, {'user': self.num_user, 'item': self.num_item}).to(self.device) self.preprocess_feature() return def train(self): self.preprocess() epoch_iter = tqdm(range(self.max_epoch)) stopper = EarlyStopping(self.args.patience, self._checkpoint) for epoch in tqdm(range(self.max_epoch), ncols=80): loss = 0 if self.args.mini_batch_flag: loss = self._mini_train_step() else: loss = self._full_train_step() if epoch % self.evaluate_interval == 0: metric_dic = self._test_step(split='val') epoch_iter.set_description( f"Epoch: {epoch:03d}, Recall@K: {metric_dic['recall']:.4f}, NDCG@K: {metric_dic['ndcg']:.4f}, Loss:{loss:.4f}" ) early_stop = stopper.step_score(metric_dic[self.val_metric], self.model) if early_stop: print('Early Stop!\tEpoch:' + str(epoch)) break print(f"Valid {self.val_metric} = {stopper.best_score: .4f}") stopper.load_model(self.model) test_metric_dic = self._test_step(split="test") #val_metric_dic = self._test_step(split="val") print(f"Test Recall@K = {test_metric_dic['recall']: .4f}, NDCG@K = {test_metric_dic['ndcg']: .4f}") # result = dict(Test_metric=test_metric_dic, Val_metric=val_metric_dic) # with open(self.args.results_path, 'w') as f: # json.dump(result, f) # f.write('\n') # self.task.dataset.save_results(result, self.args.results_path) return test_metric_dic['recall'], test_metric_dic['ndcg'], epoch # return dict(Test_metric=test_metric_dic, Val_metric=val_metric_dic) def loss_calculation(self, positive_graph, negative_graph, embedding): p_score = self.ScorePredictor(positive_graph, embedding).repeat_interleave(self.num_neg) n_score = self.ScorePredictor(negative_graph, embedding) bpr_loss = -torch.log(torch.sigmoid(p_score - n_score)).mean() reg_loss = self.regularization_loss(embedding) return bpr_loss + self.reg_weight * reg_loss def ScorePredictor(self, edge_subgraph, x): with edge_subgraph.local_scope(): for ntype in [self.user_name, self.item_name]: edge_subgraph.nodes[ntype].data['x'] = x[ntype] edge_subgraph.apply_edges( dgl.function.u_dot_v('x', 'x', 'score'), etype=self.target_link) score = edge_subgraph.edges[self.target_link].data['score'] return score.squeeze() def regularization_loss(self, embedding): reg_loss = th.zeros(1, 1, device=self.device) for e in embedding.values(): reg_loss += th.mean(e.pow(2)) return reg_loss def _full_train_step(self): self.model.train() h_dict = self.input_feature() embedding = self.model(self.train_hg, h_dict) loss = self.loss_calculation(self.positive_graph, self.negative_graph, embedding) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # print(loss.item()) return loss.item() def _test_step(self, split=None, logits=None): self.model.eval() if split == 'val': test_graph = self.val_hg elif split == 'test': test_graph = self.test_hg else: raise ValueError('split must be in [val, test]') with th.no_grad(): h_dict = self.input_feature() embedding = self.model(self.hg, h_dict) score_matrix = (embedding[self.user_name] @ embedding[self.item_name].T).detach().cpu().numpy() train_u, train_i = self.positive_graph.edges(etype=self.target_link)[0].cpu().numpy(), self.positive_graph.edges(etype=self.target_link)[1].cpu().numpy() score_matrix[train_u, train_i] = np.NINF ind = np.argpartition(score_matrix, -self.topk) # (num_users, num_items) ind = ind[:, -self.topk:] # (num_users, k), indicating non-ranked rec list arr_ind = score_matrix[np.arange(self.num_user)[:, None], ind] arr_ind_argsort = np.argsort(arr_ind)[np.arange(self.num_user), ::-1] pred_list = ind[np.arange(len(score_matrix))[:, None], arr_ind_argsort] # (num_uses, k) metric_dic = {} for m in self.metric: if m == 'recall': metric_k = recall_at_k(pred_list, test_graph, self.topk, self.user_name, self.target_link) elif m == 'ndcg': metric_k = ndcg_at_k(pred_list, test_graph, self.topk, self.user_name, self.target_link) else: raise NotImplementedError metric_dic[m] = metric_k return metric_dic def recall_at_k(pred_list, test_graph, k, user_name, target_link): sum = 0.0 test_users = 0 for user in range(test_graph.num_nodes(user_name)): test_items_set = set(test_graph.successors(user, etype=target_link).cpu().numpy()) pred_items_set = set(pred_list[user][:k]) if len(test_items_set) != 0: sum += len(test_items_set & pred_items_set) / float(len(test_items_set)) test_users += 1 return sum / test_users def ndcg_at_k(pred_list, test_graph, k, user_name, target_link): ndcg = [] for user in range(test_graph.num_nodes(user_name)): test_items_set = set(test_graph.successors(user, etype=target_link).cpu().numpy()) pred_items_set = pred_list[user][:k] hit_list = [1 if i in pred_items_set else 0 for i in test_items_set] GT = len(test_items_set) if GT >= k: ideal_hit_list = [1] * k else: ideal_hit_list = [1] * GT + [0] * (k - GT) # idcg = compute_DCG(sorted(hit_list, reverse=True)) idcg = compute_DCG(ideal_hit_list) if idcg: ndcg.append(compute_DCG(hit_list) / idcg) return np.mean(ndcg) def compute_DCG(l): l = np.array(l) if l.size: return np.sum(np.subtract(np.power(2, l), 1) / np.log2(
np.arange(2, l.size + 2)
numpy.arange
# !/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # -*- coding: utf-8 -*- import mxnet as mx import numpy as np import pickle def load_obj(name): with open(name + '.pkl', 'rb') as f: return pickle.load(f) tag_dict = load_obj("../preprocessed_data/tag_to_index") not_entity_index = tag_dict["O"] def classifer_metrics(label, pred): """ computes f1, precision and recall on the entity class """ prediction = np.argmax(pred, axis=1) label = label.astype(int) pred_is_entity = prediction != not_entity_index label_is_entity = label != not_entity_index corr_pred = (prediction == label) == (pred_is_entity == True) #how many entities are there? num_entities =
np.sum(label_is_entity)
numpy.sum
import ffn import pandas as pd import numpy as np from numpy.testing import assert_almost_equal as aae try: df = pd.read_csv('tests/data/test_data.csv', index_col=0, parse_dates=True) except FileNotFoundError as e: try: df = pd.read_csv('data/test_data.csv', index_col=0, parse_dates=True) except FileNotFoundError as e2: raise(str(e2)) ts = df['AAPL'][0:10] def test_to_returns_ts(): data = ts actual = data.to_returns() assert len(actual) == len(data) assert
np.isnan(actual[0])
numpy.isnan
from __future__ import print_function import numpy as np from scipy.io import FortranFile from scipy.interpolate import griddata import os import warnings np.seterr(all='warn') def progenitor_probability(density=None, sfr=None, mass=None, redshift=None): """ Return the progenitor fraction for input values. >>> progenitor_probability(redshift=0.4, mass=10.8) 0.266751184855 """ density = np.nan if density is None else density sfr = np.nan if sfr is None else sfr mass = np.nan if mass is None else mass redshift = np.nan if redshift is None else redshift values = [density, sfr, mass, redshift] if values.count(np.nan) > 3: raise ValueError('Incorrect number of arguments') # Read datacube f = FortranFile(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'fractions.dat')) dims = f.read_record(dtype=np.int32) data = f.read_record(dtype=np.float32) data_size = np.product(dims) n_galaxies = np.reshape(data[0:data_size], dims, order='F') n_spiral_progenitors = np.reshape(data[data_size:2*data_size], dims, order='F') bins = np.stack([np.reshape(f.read_record(dtype=np.float32), dims, order='F') for _ in range(dims.size)], axis=0) # Marginalise over dimensions that are not specified while np.nan in values: i = values.index(np.nan) dims = np.delete(dims, i) values.pop(i) weights = n_galaxies n_galaxies = np.sum(n_galaxies, axis=i) n_spiral_progenitors = np.sum(n_spiral_progenitors, axis=i) with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) bins = np.delete(np.nanmean(bins, axis=i+1), i, axis=0) data_size = np.product(dims) n_galaxies = np.reshape(n_galaxies, data_size) n_spiral_progenitors = np.reshape(n_spiral_progenitors, data_size) # Only use bins where there are at least 4 galaxies pick = n_galaxies > 3 # Calculate progenitor fractions with np.errstate(divide='ignore', invalid='ignore'): frac =
np.true_divide(n_spiral_progenitors[pick], n_galaxies[pick])
numpy.true_divide
# -*-coding:Utf-8 -* # ==================================================================== # Packages # ==================================================================== import configparser as cp import copy import glob import muLAn import muLAn.packages.general_tools as gtools import muLAn.packages.algebra as algebra import numpy as np import os import pandas as pd import sys import tables class FitResults: """Class to read, save, and manipulate models tested during the fit. Args: parser (:obj:`configparser.ConfigParser`): options and configurations for muLAn. run_id (str): Name of a muLAn archive (i.e., the name of a run). Default `None`. format (str): {`ascii` | `h5`}, default `ascii`. File format to load the MCMC results. Attributes: samples (`pandas.DataFrame`): table of all the samples explored by the MCMC. """ def __init__(self, parser, format='ascii', **kwargs): self.parser = parser # Load if format=='ascii': self.load_aimc_from_file(parser, **kwargs) elif format=='h5': self.load() def load_aimc_from_file(self, cfgsetup, **kwargs): """Method to load model parameters from files created during MCMC. This method loads ASCII files created by the package EMCEE, after the end of an MCMC run. The sampler is assumed to be and AIMC. Args: parser (:obj:`configparser.ConfigParser`): options and configurations for muLAn. run_id (str): Name of a muLAn archive (i.e., the name of a run). Default `None`. """ # Identify filenames from MCMC path = cfgsetup.get('FullPaths', 'Event') + cfgsetup.get('RelativePaths', 'Chains') if 'run_id' in kwargs: fnames_chains = glob.glob(path + kwargs['run_id'] + "*-c*.txt") fnames_chains_exclude = glob.glob(path + kwargs['run_id'] + "*g*.txt") else: fnames_chains = glob.glob(path + cfgsetup.get('Controls', 'Archive') + "*-c*.txt") fnames_chains_exclude = glob.glob(path + cfgsetup.get('Controls', 'Archive') + "*g*.txt") temp =[] for a in fnames_chains: if (a in fnames_chains_exclude)==False: temp.append(a) fnames_chains = copy.deepcopy(temp) del temp, fnames_chains_exclude nb_chains = len(fnames_chains) if nb_chains!=0: samples_file = dict( {'chi2': [], 't0': [], 'u0': [], 'tE': [], 'rho': [], \ 'gamma': [], 'piEE': [], 'piEN': [], 's': [], 'q': [], \ 'alpha': [], 'dalpha': [], 'ds': [], 'chain': [], 'fullid': [],\ 'date_save': [], 'time_save': [], 'id': [], 'accrate': [],\ 'chi2/dof': []}) # Read on the chains if nb_chains > 0: for i in range(nb_chains): file = open(fnames_chains[i], 'r') for line in file: params_model = line if params_model[0] == '#': continue try: samples_file['id'].append(int( [a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][0])) samples_file['t0'].append(float( [a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][1])) samples_file['u0'].append(float( [a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][2])) samples_file['tE'].append(float( [a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][3])) samples_file['rho'].append(float( [a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][4])) samples_file['gamma'].append(float( [a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][5])) samples_file['piEN'].append(float( [a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][6])) samples_file['piEE'].append(float( [a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][7])) samples_file['s'].append(float( [a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][8])) samples_file['q'].append(float( [a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][9])) samples_file['alpha'].append(float( [a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][10])) samples_file['dalpha'].append(float( [a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][11])) samples_file['ds'].append(float( [a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][12])) samples_file['chi2'].append(float( [a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][13])) samples_file['accrate'].append(float( [a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][14])) samples_file['date_save'].append(int( [a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][15])) samples_file['time_save'].append( [a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][16]) samples_file['chi2/dof'].append(float( [a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][17])) samples_file['chain'].append(int(fnames_chains[i][-8:-4])) samples_file['fullid'].append(-1) except: text = "\n\033[1m\033[91mThe file\033[0m\n" + "\033[1m\033[91m" + fnames_chains[i]\ + "\033[0m\n\033[1m\033[91mis corrupted. muLAn killed.\033[0m" sys.exit(text) file.close() # Create a pandas.DataFrame to store the runs samples = pd.DataFrame(samples_file) samples['dchi2'] = samples['chi2'] - np.min(samples['chi2']) samples = samples.sort_values(['dchi2', 'fullid'], ascending=[1, 0]) samples['fs_ref'] = 0.0 samples['fb_ref'] = 0.0 # Add physical quantities if not 'tS' in samples: samples['tS'] = samples['tE'] * samples['rho'] if not 'tb' in samples: samples['tb'] = cfgsetup.getfloat('Modelling', 'tb') # Give a unique ID to models id_start = np.max(samples['fullid']) + 1 if id_start == 0 : id_start = 1 mask = samples['fullid'] == -1 samples.loc[mask, 'fullid'] = id_start + np.arange(mask.sum()) self.samples = samples def save(self, filename=None, format='h5', N=None): """Save MCMC samples in the specified format. Args: filename (str): file name for the output file. format (str, default 'h5'): {'ascii' | 'h5'} """ if format == 'h5': if filename==None: fname = "{:s}-Fits.h5".format(self.parser.get('Controls', 'Archive')) else: fname = filename self.samples.to_hdf(fname, 'fits', mode='w') elif format == 'ascii': if filename==None: fname = "{:s}-Fits.csv".format(self.parser.get('Controls', 'Archive')) else: fname = filename if N == None: N = len(self.samples) # Save new file in csv with exponential file = open(fname, 'w') format = '#{:},'.format('UniqueID')\ + '{:},'.format('dchi2')\ + '{:},'.format('t0')\ + '{:},'.format('u0')\ + '{:},'.format('tE')\ + '{:},'.format('rho')\ + '{:},'.format('gamma')\ + '{:},'.format('piEN')\ + '{:},'.format('piEE')\ + '{:},'.format('s')\ + '{:},'.format('q')\ + '{:},'.format('alpha')\ + '{:},'.format('dalpha')\ + '{:},'.format('ds')\ + '{:},'.format('chi2')\ + '{:},'.format('chi2/dof')\ + '{:},'.format('accrate')\ + '{:}'.format('chain')\ + '\n' file.write(format) for i in range(N): format = '{:},'.format(self.samples['fullid'].values[i])\ + '{:.3f},'.format(self.samples['dchi2'].values[i])\ + '{:.10e},'.format(self.samples['t0'].values[i])\ + '{:.10e},'.format(self.samples['u0'].values[i])\ + '{:.10e},'.format(self.samples['tE'].values[i])\ + '{:.10e},'.format(self.samples['rho'].values[i])\ + '{:.10e},'.format(self.samples['gamma'].values[i])\ + '{:.10e},'.format(self.samples['piEN'].values[i])\ + '{:.10e},'.format(self.samples['piEE'].values[i])\ + '{:.10e},'.format(self.samples['s'].values[i])\ + '{:.10e},'.format(self.samples['q'].values[i])\ + '{:.10e},'.format(self.samples['alpha'].values[i])\ + '{:.10e},'.format(self.samples['dalpha'].values[i])\ + '{:.10e},'.format(self.samples['ds'].values[i])\ + '{:.10e},'.format(self.samples['chi2'].values[i])\ + '{:.10e},'.format(self.samples['chi2/dof'].values[i])\ + '{:.3f},'.format(self.samples['accrate'].values[i])\ + '{:}'.format(self.samples['chain'].values[i])\ + '\n' file.write(format) file.close() def load(self, filename=None, format='h5'): """Save MCMC samples in the specified format. Args: fname (str): file name for the output file. format (str): currently, the only option is 'hdf5'. """ if format == 'h5': if filename==None: fname = "{:s}-Fits.h5".format(self.parser.get('Controls', 'Archive')) else: fname = filename self.samples = pd.read_hdf(fname, 'fits') def remove_duplicates(self, inplace=False, **kwargs): """Create a table of MCMC samples without duplicates. Args: inplace (bool): default False. Replace self.samples if True, return the resulting table otherwise. """ col = ['chi2', 't0', 'u0', 'tE', 'rho', 'gamma', 'piEE', 'piEN', 's', 'q', 'alpha', 'dalpha', 'ds'] if inplace: self.samples = self.samples.loc[ self.samples[col].round(12).drop_duplicates(subset=col).index] else: samples = self.samples.loc[ self.samples[col].round(12).drop_duplicates(subset=col).index] return samples class LensModel: """Class to compute a microlensing model with requested algorithms. Args: model (:obj:`muLAn.iotools.FitResults`): model parameters. data (:obj:`muLAn.data.Data`, default None): table of observations. epochs (, default None): list of epochs to compute a model. Attributes: fit (:obj:`muLAn.iotools.FitResults`): model parameters. """ def __init__(self, archive='archive.h5', **kwargs): self.archive = archive.replace('//','/') def compute(self, data=None, models=None, lib=None, parser=None, magnification=False, save=True): """Method computing the magnification Args: lib models data parser magnification """ # - magnification of data # - flux of data # - aligned data # - residual of data (in flux and magnitude and in sigmas for both) # - source trajectory # - magnification at some points to plot the model table = data.table.copy(deep=True) instrument = np.unique(table['obs']) algo = np.unique(table['model']) obs_ref = parser.items('Observatories')[0][0] fs_ref = 0.0 fb_ref = 0.0 midx = models.index for k in range(len(midx)): params = models.loc[midx[k]].to_dict() tb = models.loc[midx[k], 'tb'] for j in range(len(instrument)): mask1 = table['obs'] == instrument[j] for i in range(algo.shape[0]): mask = (table['obs'] == instrument[j])\ & (table['model'] == algo[i]) if mask.sum() > 0: epochs = table.loc[mask, 'dates'].values DsN = table.loc[mask, 'DsN'].values DsE = table.loc[mask, 'DsE'].values Ds = dict({'N': DsN, 'E': DsE}) try: kwargs_method = dict(parser.items(algo[i])) except: kwargs_method = dict() mag = lib[algo[i]].magnifcalc(epochs, params, Ds=Ds, tb=tb, **kwargs_method) table.loc[mask,'amp'] = mag fs, fb = algebra.fsfbwsig(table[mask1], None, blending=True) table.loc[mask1,'fs'] = fs table.loc[mask1,'fb'] = fb if instrument[j] == obs_ref: fs_ref = fs fb_ref = fb table['flux_model'] = table['fs'] * table['amp'] + table['fb'] table['amp_data'] = (table['flux'] - table['fb']) / table['fs'] table['normalized_flux'] = table['amp_data'] * fs_ref + fb_ref table['normalized_flux_err'] = table['err_flux'] * fs_ref / table['flux'] if save: try: key = self.archive.split('/')[-1].split('.h5') key = ''.join(key[:-1]) key = '{:s}_{:d}_data'.format(key, params['fullid']) table.to_hdf(self.archive, key=key, mode='a') except tables.exceptions.HDF5ExtError as e: txt = '\n\nSomething is wrong with the file {:s}.'.format(self.archive) txt = '{:s}\nPlease check if the file is not used by another software.'.format(txt) print(e, txt) sys.exit() def magnification_model(self, epochs=None, models=None, lib=None, save=True): """Method computing the magnification Args: lib models data parser magnification """ algo =
np.unique(epochs['model'])
numpy.unique
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ This module contains some math utils that are used in the chemenv package. """ __author__ = "<NAME>" __copyright__ = "Copyright 2012, The Materials Project" __credits__ = "<NAME>" __version__ = "2.0" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __date__ = "Feb 20, 2016" from math import sqrt import numpy as np from scipy.special import erf from functools import reduce ############################################################## ### cartesian product of lists ################################## ############################################################## def _append_es2sequences(sequences, es): result = [] if not sequences: for e in es: result.append([e]) else: for e in es: result += [seq+[e] for seq in sequences] return result def _cartesian_product(lists): """ given a list of lists, returns all the possible combinations taking one element from each list The list does not have to be of equal length """ return reduce(_append_es2sequences, lists, []) def prime_factors(n): """Lists prime factors of a given natural integer, from greatest to smallest :param n: Natural integer :rtype : list of all prime factors of the given natural n """ i = 2 while i <= sqrt(n): if n % i == 0: l = prime_factors(n/i) l.append(i) return l i += 1 return [n] # n is prime def _factor_generator(n): """ From a given natural integer, returns the prime factors and their multiplicity :param n: Natural integer :return: """ p = prime_factors(n) factors = {} for p1 in p: try: factors[p1] += 1 except KeyError: factors[p1] = 1 return factors def divisors(n): """ From a given natural integer, returns the list of divisors in ascending order :param n: Natural integer :return: List of divisors of n in ascending order """ factors = _factor_generator(n) _divisors = [] listexponents = [[k**x for x in range(0, factors[k]+1)] for k in list(factors.keys())] listfactors = _cartesian_product(listexponents) for f in listfactors: _divisors.append(reduce(lambda x, y: x*y, f, 1)) _divisors.sort() return _divisors def get_center_of_arc(p1, p2, radius): dx = p2[0] - p1[0] dy = p2[1] - p1[1] dd = np.sqrt(dx*dx + dy*dy) radical = np.power((radius / dd), 2) - 0.25 if radical < 0: raise ValueError("Impossible to find center of arc because the arc is ill-defined") tt = np.sqrt(radical) if radius > 0: tt = -tt return (p1[0] + p2[0]) / 2 - tt * dy, (p1[1] + p2[1]) / 2 + tt * dx def get_linearly_independent_vectors(vectors_list): independent_vectors_list = [] for vector in vectors_list: if np.any(vector != 0): if len(independent_vectors_list) == 0: independent_vectors_list.append(np.array(vector)) elif len(independent_vectors_list) == 1: rank = np.linalg.matrix_rank(np.array([independent_vectors_list[0], vector, [0, 0, 0]])) if rank == 2: independent_vectors_list.append(np.array(vector)) elif len(independent_vectors_list) == 2: mm =
np.array([independent_vectors_list[0], independent_vectors_list[1], vector])
numpy.array
# -*- coding: utf-8 -*- """ Module for mathematical analysis of voltage traces from electrophysiology. AUTHOR: <NAME> """ import scipy.stats import numpy as np import math import logging import sys from scipy import interpolate import operator import pprint pp = pprint.PrettyPrinter(indent=4) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def print_comment_v(text, warning=False): print_comment(text, True, warning) def print_comment(text, print_it=False, warning=False): prefix = "pyelectro >>> " if warning: prefix += "WARNING " if not isinstance(text, str): text = text.decode("ascii") if print_it: print("%s%s" % (prefix, text.replace("\n", "\n" + prefix))) def voltage_plot(t, v, title=None): """ Plot electrophysiology recording. """ from matplotlib import pyplot as plt plt.xlabel("Time (ms)") plt.ylabel("Voltage (mV)") plt.title(title) plt.grid() plt.plot(t, v) plt.show() def smooth(x, window_len=11, window="hanning"): """Smooth the data using a window with requested size. This function is useful for smoothing out experimental data. This method utilises the convolution of a scaled window with the signal. The signal is prepared by introducing reflected copies of the signal (with the window size) in both ends so that transient parts are minimized in the begining and end part of the output signal. :param x: the input signal :param window_len: the dimension of the smoothing window; should be an odd integer :param window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman', flat window will produce a moving average smoothing. :return: smoothed signal example: .. code-block:: python t=linspace(-2,2,0.1) x=sin(t)+randn(len(t))*0.1 y=smooth(x) .. seealso:: numpy.hanning numpy.hamming numpy.bartlett numpy.blackman numpy.convolve scipy.signal.lfilter """ if x.ndim != 1: raise (ValueError, "smooth only accepts 1 dimension arrays.") if x.size < window_len: raise (ValueError, "Input vector needs to be bigger than window size.") if window_len < 3: return x if window not in ["flat", "hanning", "hamming", "bartlett", "blackman"]: raise ( ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'", ) s = np.r_[x[(window_len - 1):0:-1], x, x[-1:-window_len:-1]] if window == "flat": # moving average w = np.ones(window_len, "d") else: w = eval("np." + window + "(window_len)") y = np.convolve(w / w.sum(), s, mode="valid") edge = int(window_len / 2) return y[edge:-edge] def linear_fit(t, y): """Fits data to a line :param t: time vector :param y: variable which varies with time (such as voltage) :returns: Gradient M for a formula of the type y=C+M*x """ vals = np.array(y) m, C = np.polyfit(t, vals, 1) return m def three_spike_adaptation(t, y): """Linear fit of amplitude vs time of first three AP spikes Initial action potential amplitudes may very substaintially in amplitude and then settle down. :param t: time vector (AP times) :param y: corresponding AP amplitude :returns: Gradient M for a formula of the type y=C+M*x for first three action potentials """ t = np.array(t) y = np.array(y) t = t[0:3] y = y[0:3] m = linear_fit(t, y) return m def exp_fit(t, y): """ Fits data to an exponential. Returns K for a formula of the type y=A*exp(K*x) :param t: time vector :param y: variable which varies with time (such as voltage) """ vals = np.array(y) C = np.min(vals) vals = vals - C + 1e-9 # make sure the data is all positive vals = np.log(vals) K, A_log = np.polyfit(t, vals, 1) return K def window_peak_detector(v, delta=0.01): """ Detects peak by comparing mean of either side of peak and deciding whether it exceeds some threshold. :return: Boolean, True if a peak is detected in that window """ if len(v) % 2 == 0: raise Exception("Window length must be odd") middle_index = len(v) // 2 middle_value = v[middle_index] left_mean = np.mean(v[0:middle_index]) right_mean = np.mean(v[middle_index + 1 :]) left_elevation = middle_value - left_mean right_elevation = middle_value - right_mean left_exceeds_threhold = left_elevation > delta right_exceeds_threshold = right_elevation > delta return left_exceeds_threhold and right_exceeds_threshold def centered_slice(v, index, length=5): """ Retruns slice of given length centred on index. """ if length % 2 == 0: raise Exception("Window length must be odd") if len(v) < index + length // 2: raise Exception("Index too close to edge or window too big") start_index = index - length // 2 slice = v[start_index : start_index + length] return slice def max_min_simple(a, times, delta=0, peak_threshold=0.0, verbose=False): print_comment( "Calculating max_min_simple of a: (%s,...,%s)#%i, t: (%s,...,%s)#%i; thresh %s, delta %s" % (a[0], a[-1], len(a), times[0], times[-1], len(times), peak_threshold, delta), verbose, ) maxima_locations = [] maxima_number = 0 maxima_times = [] maxima_values = [] minima_locations = [] minima_number = 0 minima_times = [] minima_values = [] spiking = False has_spiked = False last_max_loc = -1 last_max_t = -1 last_max_v = -1 * sys.float_info.max last_min_loc = -1 last_min_t = -1 last_min_v = sys.float_info.max for i in range(len(a)): t = times[i] v = a[i] if not spiking and v >= peak_threshold: print_comment("Spike of %s at %s" % (v, t), verbose) spiking = True has_spiked = True if last_min_loc > 0: minima_locations.append(last_min_loc) minima_times.append(last_min_t) minima_values.append(last_min_v) minima_number += 1 last_min_loc = -1 last_min_t = -1 last_min_v = sys.float_info.max elif spiking and v < peak_threshold: spiking = False if last_max_loc > 0: maxima_locations.append(last_max_loc) maxima_times.append(last_max_t) maxima_values.append(last_max_v) maxima_number += 1 last_max_loc = -1 last_max_t = -1 last_max_v = -1 * sys.float_info.max if spiking: if v >= last_max_v: last_max_loc = i last_max_t = t last_max_v = v elif has_spiked: if v <= last_min_v: last_min_loc = i last_min_t = t last_min_v = v # need to construct the dictionary here: turning_points = { "maxima_locations": maxima_locations, "minima_locations": minima_locations, "maxima_number": maxima_number, "minima_number": minima_number, "maxima_times": maxima_times, "minima_times": minima_times, "maxima_values": maxima_values, "minima_values": minima_values, } return turning_points def max_min(a, t, delta=0, peak_threshold=0.0, verbose=False): """ Find the maxima and minima of a voltage trace. :note This method does not appear to be very robust when comparing to experimental data :param a: time-dependent variable (usually voltage) :param t: time-vector :param delta: the value by which a peak or trough has to exceed its neighbours to be considered outside of the noise :param peak_threshold: peaks below this value are discarded :return: turning_points, dictionary containing number of max, min and their locations .. note:: minimum value between two peaks is in some ways a better way of obtaining a minimum since it guarantees an answer, this may be something which should be implemented. """ if peak_threshold is None: import sys peak_threshold = -1 * sys.float_info.max print_comment( "Calculating max_min of a: (%s,...,%s)#%i, t: (%s,...,%s)#%i; thresh %s, delta %s" % (a[0], a[-1], len(a), t[0], t[-1], len(t), peak_threshold, delta), verbose, ) gradients = np.diff(a) maxima_info = [] minima_info = [] count = 0 for i in gradients[:-1]: count += 1 if i > 0 and gradients[count] < 0 and i != gradients[count]: # found a maximum maximum_value = a[count] maximum_location = count maximum_time = t[count] preceding_point_value = a[maximum_location - 1] succeeding_point_value = a[maximum_location + 1] # filter: maximum_valid = False # logically consistent but not very pythonic.. if ((maximum_value - preceding_point_value) > delta) * ( (maximum_value - succeeding_point_value) > delta ): maximum_valid = True if maximum_value < peak_threshold: maximum_valid = False if maximum_valid: maxima_info.append((maximum_value, maximum_location, maximum_time)) maxima_num = len(maxima_info) if maxima_num > 0: minima_num = maxima_num - 1 else: minima_num = 0 values_getter = operator.itemgetter(0) location_getter = operator.itemgetter(1) time_getter = operator.itemgetter(2) maxima_locations = list(map(location_getter, maxima_info)) maxima_times = list(map(time_getter, maxima_info)) maxima_values = list(map(values_getter, maxima_info)) for i in range(maxima_num - 1): maximum_0_location = maxima_locations[i] maximum_1_location = maxima_locations[i + 1] interspike_slice = a[maximum_0_location:maximum_1_location] minimum_value = min(interspike_slice) minimum_location = ( list(interspike_slice).index(minimum_value) + maximum_0_location ) minimum_time = t[minimum_location] minima_info.append((minimum_value, minimum_location, minimum_time)) minima_locations = list(map(location_getter, minima_info)) minima_times = list(map(time_getter, minima_info)) minima_values = list(map(values_getter, minima_info)) # need to construct the dictionary here: turning_points = { "maxima_locations": maxima_locations, "minima_locations": minima_locations, "maxima_number": maxima_num, "minima_number": minima_num, "maxima_times": maxima_times, "minima_times": minima_times, "maxima_values": maxima_values, "minima_values": minima_values, } return turning_points ''' PG removing this... def max_min2(v,t,delta=0.1,peak_threshold=0.0,window_length=11): """ Uses the max_min function but then does a second pass with window peak detector to discard peaks. This is being prepared as an enhancement to the old peak detector. """ max_min_dict = max_min(v,t,delta=0.0,peak_threshold=peak_threshold) maxima_locations = max_min_dict['maxima_locations'] peak_mask = [] for location in maxima_locations: slice = centered_slice(v,location,window_length) peak_flag = window_peak_detector(slice, delta=delta) peak_mask.append(peak_flag) #this anonymous function strips a list of all corresponding #non-zero elements in the mask: print("peak_mask: "+peak_mask) mask_filter = lambda l, mask : list(itertools.compress(l,mask)) max_min_dict.pop('maxima_number',None) max_min_dict.pop('minima_number',None) dict_keys = max_min_dict.keys() for key in dict_keys: max_min_dict[key] = mask_filter(max_min_dict[key],peak_mask) max_min_dict['maxima_number'] = len(max_min_dict['maxima_locations']) max_min_dict['minima_number'] = max_min_dict['maxima_number'] - 1 return max_min_dict''' def spike_frequencies(t): """ Calculate frequencies associated with interspike times :param t: a list of spike times in ms :return: list of frequencies in Hz associated with interspike times and times associated with the frequency (time of first spike in pair) """ spike_times = np.array(t) interspike_times = np.diff(spike_times) interspike_frequencies = 1000 / interspike_times return [t[:-1], interspike_frequencies] def max_min_interspike_time(t): """ Calculate the maximum & minimum interspike interval from the list of maxima times :param t: a list of spike times in ms :return: (max, min) interspike time """ spike_times = np.array(t) interspike_times = np.diff(spike_times) return max(interspike_times), min(interspike_times) def mean_spike_frequency(t): """ Find the average frequency of spikes :param t: a list of spike times in ms :return: mean spike frequency in Hz, calculated from mean interspike time """ interspike_times = np.diff(t) mean_interspike_time = np.mean(interspike_times) mean_frequency = 1000.0 / ( mean_interspike_time ) # factor of 1000 to give frequency in Hz if math.isnan(mean_frequency): mean_frequency = 0 return mean_frequency def y_from_x(y, x, y_to_find): """ Returns list of x values corresponding to a y after a doing a univariate spline interpolation :param x: x-axis numerical data :param y: corresponding y-axis numerical data :param y_to_find: x value for desired y-value, interpolated from nearest two measured x/y value pairs :return: interpolated y value """ # TODO:should have the ability to return indices, this should be a flag yreduced = np.array(y) - y_to_find freduced = interpolate.UnivariateSpline(x, yreduced, s=None) return freduced.roots() def single_spike_width(y, t, baseline): """Find the width of a spike at a fixed height calculates the width of the spike at height baseline. If the spike shape does not intersect the height at both sides of the peak the method will return value 0. If the peak is below the baseline 0 will also be returned. The input must be a single spike or nonsense may be returned. Multiple-spike data can be handled by the interspike_widths method. :param y: voltage trace (array) corresponding to the spike :param t: time value array corresponding to y :param baseline: the height (voltage) where the width is to be measured. :return: width of spike at height defined by baseline """ logger.debug("Baseline: %f" % baseline) try: y = np.array(y) t = np.array(t) value = np.max(y) location = np.argmax(y) logger.debug("Max voltage: %f" % value) logger.debug("Index of max: %f" % location) # moving left: while value > baseline: location -= 1 value = y[location] undershoot_value = y[location + 1] overshoot_time = t[location] undershoot_time = t[location + 1] interpolated_left_time = np.interp( baseline, [value, undershoot_value], [overshoot_time, undershoot_time] ) if location < 0: raise ValueError("Baseline does not intersect spike") # now go right value = np.max(y) location = np.argmax(y) while value > baseline: location += 1 value = y[location] undershoot_value = y[location - 1] overshoot_time = t[location] undershoot_time = t[location - 1] interpolated_right_time = np.interp( baseline, [value, undershoot_value], [overshoot_time, undershoot_time] ) if location > len(y) - 1: raise ValueError("Baseline does not intersect spike") width = interpolated_right_time - interpolated_left_time except: logger.warning("Single spike width algorithm failure - setting to 0") width = 0.0 return width def spike_widths(y, t, max_min_dictionary, baseline=0, delta=0): """ Find the widths of each spike at a fixed height in a train of spikes. Returns the width of the spike of each spike in a spike train at height baseline. If the spike shapes do not intersect the height at both sides of the peak the method will return value 0 for that spike. If the peak is below the baseline 0 will also be returned for that spike. :param y: voltage trace (array) corresponding to the spike train :param t: time value array corresponding to y :param max_min_dictionary: precalculated max_min_dictionary :param baseline: the height (voltage) where the width is to be measured. :return: width of spike at height defined by baseline """ max_num = max_min_dictionary["maxima_number"] maxima_times = max_min_dictionary["maxima_times"] minima_locations = max_min_dictionary["minima_locations"] spike_widths = [] for i in range(max_num): # need to splice down the y: if i == 0: left_min_location = 0 right_min_location = minima_locations[i] + 1 elif i == max_num - 1: left_min_location = minima_locations[i - 1] right_min_location = len(y) else: left_min_location = minima_locations[i - 1] right_min_location = minima_locations[i] + 1 spike_shape = y[left_min_location:right_min_location] spike_t = t[left_min_location:right_min_location] try: width = single_spike_width(spike_shape, spike_t, baseline) logger.debug("Spike width: %f" % width) except: logger.warning("Spike width set to 0, this indicates a problem") width = 0 spike_widths.append(width) maxima_times_widths = [maxima_times, spike_widths] return maxima_times_widths def burst_analyser(t): """Pearson's correlation coefficient applied to interspike times :param t: Rank-1 array containing spike times :return: pearson's correlation coefficient of interspike times """ x = np.arange(len(t)) pearsonr = scipy.stats.pearsonr(x, t)[0] return pearsonr def spike_covar(t): """Calculates the coefficient of variation of interspike times :param t: Rank-1 array containing spike times :return: coefficient of variation of interspike times """ interspike_times = np.diff(t) covar = scipy.stats.variation(interspike_times) return covar def inflexion_spike_detector( v, t, threshold=0.4, indices=False, max_data_points=2000, voltage_threshold=-30 ): """ Computes spike start and stop times based on extent of voltage deflection. This function requires some familiarity with Python to understand. :param indices: whether to return tuples of indices for each spike or times :return list of tuples with start and end indices of every AP """ v = smooth(v) voltage_derivative =
np.diff(v)
numpy.diff
#!/usr/bin/env python # Copyright (c) 2019 Intel Corporation # # This work is licensed under the terms of the MIT license. # For a copy, see <https://opensource.org/licenses/MIT>. """ Summary of useful helper functions for scenarios """ import math import shapely.geometry import shapely.affinity import numpy as np import carla from agents.tools.misc import vector from agents.navigation.local_planner import RoadOption from srunner.scenariomanager.carla_data_provider import CarlaDataProvider def get_distance_along_route(route, target_location): """ Calculate the distance of the given location along the route Note: If the location is not along the route, the route length will be returned """ wmap = CarlaDataProvider.get_map() covered_distance = 0 prev_position = None found = False # Don't use the input location, use the corresponding wp as location target_location_from_wp = wmap.get_waypoint(target_location).transform.location for position, _ in route: location = target_location_from_wp # Don't perform any calculations for the first route point if not prev_position: prev_position = position continue # Calculate distance between previous and current route point interval_length_squared = ((prev_position.x - position.x) ** 2) + ((prev_position.y - position.y) ** 2) distance_squared = ((location.x - prev_position.x) ** 2) + ((location.y - prev_position.y) ** 2) # Close to the current position? Stop calculation if distance_squared < 0.01: break if distance_squared < 400 and not distance_squared < interval_length_squared: # Check if a neighbor lane is closer to the route # Do this only in a close distance to correct route interval, otherwise the computation load is too high starting_wp = wmap.get_waypoint(location) wp = starting_wp.get_left_lane() while wp is not None: new_location = wp.transform.location new_distance_squared = ((new_location.x - prev_position.x) ** 2) + ( (new_location.y - prev_position.y) ** 2) if np.sign(starting_wp.lane_id) != np.sign(wp.lane_id): break if new_distance_squared < distance_squared: distance_squared = new_distance_squared location = new_location else: break wp = wp.get_left_lane() wp = starting_wp.get_right_lane() while wp is not None: new_location = wp.transform.location new_distance_squared = ((new_location.x - prev_position.x) ** 2) + ( (new_location.y - prev_position.y) ** 2) if np.sign(starting_wp.lane_id) != np.sign(wp.lane_id): break if new_distance_squared < distance_squared: distance_squared = new_distance_squared location = new_location else: break wp = wp.get_right_lane() if distance_squared < interval_length_squared: # The location could be inside the current route interval, if route/lane ids match # Note: This assumes a sufficiently small route interval # An alternative is to compare orientations, however, this also does not work for # long route intervals curr_wp = wmap.get_waypoint(position) prev_wp = wmap.get_waypoint(prev_position) wp = wmap.get_waypoint(location) if prev_wp and curr_wp and wp: if wp.road_id == prev_wp.road_id or wp.road_id == curr_wp.road_id: # Roads match, now compare the sign of the lane ids if (np.sign(wp.lane_id) == np.sign(prev_wp.lane_id) or np.sign(wp.lane_id) == np.sign(curr_wp.lane_id)): # The location is within the current route interval covered_distance += math.sqrt(distance_squared) found = True break covered_distance += math.sqrt(interval_length_squared) prev_position = position return covered_distance, found def get_crossing_point(actor): """ Get the next crossing point location in front of the ego vehicle @return point of crossing """ wp_cross = CarlaDataProvider.get_map().get_waypoint(actor.get_location()) while not wp_cross.is_intersection: wp_cross = wp_cross.next(2)[0] crossing = carla.Location(x=wp_cross.transform.location.x, y=wp_cross.transform.location.y, z=wp_cross.transform.location.z) return crossing def get_geometric_linear_intersection(ego_actor, other_actor): """ Obtain a intersection point between two actor's location by using their waypoints (wp) @return point of intersection of the two vehicles """ wp_ego_1 = CarlaDataProvider.get_map().get_waypoint(ego_actor.get_location()) wp_ego_2 = wp_ego_1.next(1)[0] x_ego_1 = wp_ego_1.transform.location.x y_ego_1 = wp_ego_1.transform.location.y x_ego_2 = wp_ego_2.transform.location.x y_ego_2 = wp_ego_2.transform.location.y wp_other_1 = CarlaDataProvider.get_world().get_map().get_waypoint(other_actor.get_location()) wp_other_2 = wp_other_1.next(1)[0] x_other_1 = wp_other_1.transform.location.x y_other_1 = wp_other_1.transform.location.y x_other_2 = wp_other_2.transform.location.x y_other_2 = wp_other_2.transform.location.y s = np.vstack([(x_ego_1, y_ego_1), (x_ego_2, y_ego_2), (x_other_1, y_other_1), (x_other_2, y_other_2)]) h = np.hstack((s, np.ones((4, 1)))) line1 = np.cross(h[0], h[1]) line2 = np.cross(h[2], h[3]) x, y, z = np.cross(line1, line2) if z == 0: return (float('inf'), float('inf')) intersection = carla.Location(x=x / z, y=y / z, z=0) return intersection def get_location_in_distance(actor, distance): """ Obtain a location in a given distance from the current actor's location. Note: Search is stopped on first intersection. @return obtained location and the traveled distance """ waypoint = CarlaDataProvider.get_map().get_waypoint(actor.get_location()) traveled_distance = 0 while not waypoint.is_intersection and traveled_distance < distance: waypoint_new = waypoint.next(1.0)[-1] traveled_distance += waypoint_new.transform.location.distance(waypoint.transform.location) waypoint = waypoint_new return waypoint.transform.location, traveled_distance def get_location_in_distance_from_wp(waypoint, distance, stop_at_junction=True): """ Obtain a location in a given distance from the current actor's location. Note: Search is stopped on first intersection. @return obtained location and the traveled distance """ traveled_distance = 0 while not (waypoint.is_intersection and stop_at_junction) and traveled_distance < distance: wp_next = waypoint.next(1.0) if wp_next: waypoint_new = wp_next[-1] traveled_distance += waypoint_new.transform.location.distance(waypoint.transform.location) waypoint = waypoint_new else: break return waypoint.transform.location, traveled_distance def get_waypoint_in_distance(waypoint, distance): """ Obtain a waypoint in a given distance from the current actor's location. Note: Search is stopped on first intersection. @return obtained waypoint and the traveled distance """ traveled_distance = 0 while not waypoint.is_intersection and traveled_distance < distance: waypoint_new = waypoint.next(1.0)[-1] traveled_distance += waypoint_new.transform.location.distance(waypoint.transform.location) waypoint = waypoint_new return waypoint, traveled_distance def generate_target_waypoint_list(waypoint, turn=0): """ This method follow waypoints to a junction and choose path based on turn input. Turn input: LEFT -> -1, RIGHT -> 1, STRAIGHT -> 0 @returns a waypoint list from the starting point to the end point according to turn input """ reached_junction = False threshold = math.radians(0.1) plan = [] while True: wp_choice = waypoint.next(2) if len(wp_choice) > 1: reached_junction = True waypoint = choose_at_junction(waypoint, wp_choice, turn) else: waypoint = wp_choice[0] plan.append((waypoint, RoadOption.LANEFOLLOW)) # End condition for the behavior if turn != 0 and reached_junction and len(plan) >= 3: v_1 = vector( plan[-2][0].transform.location, plan[-1][0].transform.location) v_2 = vector( plan[-3][0].transform.location, plan[-2][0].transform.location) angle_wp = math.acos( np.dot(v_1, v_2) / abs((np.linalg.norm(v_1) * np.linalg.norm(v_2)))) if angle_wp < threshold: break elif reached_junction and not plan[-1][0].is_intersection: break return plan, plan[-1][0] def generate_target_waypoint_list_multilane(waypoint, change='left', # pylint: disable=too-many-return-statements distance_same_lane=10, distance_other_lane=25, total_lane_change_distance=25, check=True, lane_changes=1, step_distance=2): """ This methods generates a waypoint list which leads the vehicle to a parallel lane. The change input must be 'left' or 'right', depending on which lane you want to change. The default step distance between waypoints on the same lane is 2m. The default step distance between the lane change is set to 25m. @returns a waypoint list from the starting point to the end point on a right or left parallel lane. The function might break before reaching the end point, if the asked behavior is impossible. """ plan = [] plan.append((waypoint, RoadOption.LANEFOLLOW)) # start position option = RoadOption.LANEFOLLOW # Same lane distance = 0 while distance < distance_same_lane: next_wps = plan[-1][0].next(step_distance) if not next_wps: return None, None next_wp = next_wps[0] distance += next_wp.transform.location.distance(plan[-1][0].transform.location) plan.append((next_wp, RoadOption.LANEFOLLOW)) if change == 'left': option = RoadOption.CHANGELANELEFT elif change == 'right': option = RoadOption.CHANGELANERIGHT else: # ERROR, input value for change must be 'left' or 'right' return None, None lane_changes_done = 0 lane_change_distance = total_lane_change_distance / lane_changes # Lane change while lane_changes_done < lane_changes: # Move forward next_wps = plan[-1][0].next(lane_change_distance) if not next_wps: return None, None next_wp = next_wps[0] # Get the side lane if change == 'left': if check and str(next_wp.lane_change) not in ['Left', 'Both']: return None, None side_wp = next_wp.get_left_lane() else: if check and str(next_wp.lane_change) not in ['Right', 'Both']: return None, None side_wp = next_wp.get_right_lane() if not side_wp or side_wp.lane_type != carla.LaneType.Driving: return None, None # Update the plan plan.append((side_wp, option)) lane_changes_done += 1 # Other lane distance = 0 while distance < distance_other_lane: next_wps = plan[-1][0].next(step_distance) if not next_wps: return None, None next_wp = next_wps[0] distance += next_wp.transform.location.distance(plan[-1][0].transform.location) plan.append((next_wp, RoadOption.LANEFOLLOW)) target_lane_id = plan[-1][0].lane_id return plan, target_lane_id def generate_target_waypoint(waypoint, turn=0): """ This method follow waypoints to a junction and choose path based on turn input. Turn input: LEFT -> -1, RIGHT -> 1, STRAIGHT -> 0 @returns a waypoint list according to turn input """ sampling_radius = 1 reached_junction = False wp_list = [] while True: wp_choice = waypoint.next(sampling_radius) # Choose path at intersection if not reached_junction and (len(wp_choice) > 1 or wp_choice[0].is_junction): reached_junction = True waypoint = choose_at_junction(waypoint, wp_choice, turn) else: waypoint = wp_choice[0] wp_list.append(waypoint) # End condition for the behavior if reached_junction and not wp_list[-1].is_junction: break return wp_list[-1] def generate_target_waypoint_in_route(waypoint, route): """ This method follow waypoints to a junction @returns a waypoint list according to turn input """ wmap = CarlaDataProvider.get_map() reached_junction = False # Get the route location shortest_distance = float('inf') for index, route_pos in enumerate(route): wp = route_pos[0] trigger_location = waypoint.transform.location dist_to_route = trigger_location.distance(wp) if dist_to_route <= shortest_distance: closest_index = index shortest_distance = dist_to_route route_location = route[closest_index][0] index = closest_index while True: # Get the next route location index = min(index + 1, len(route)) route_location = route[index][0] road_option = route[index][1] # Enter the junction if not reached_junction and (road_option in (RoadOption.LEFT, RoadOption.RIGHT, RoadOption.STRAIGHT)): reached_junction = True # End condition for the behavior, at the end of the junction if reached_junction and (road_option not in (RoadOption.LEFT, RoadOption.RIGHT, RoadOption.STRAIGHT)): break return wmap.get_waypoint(route_location) def choose_at_junction(current_waypoint, next_choices, direction=0): """ This function chooses the appropriate waypoint from next_choices based on direction """ current_transform = current_waypoint.transform current_location = current_transform.location projected_location = current_location + \ carla.Location( x=math.cos(math.radians(current_transform.rotation.yaw)), y=math.sin(math.radians(current_transform.rotation.yaw))) current_vector = vector(current_location, projected_location) cross_list = [] cross_to_waypoint = {} for waypoint in next_choices: waypoint = waypoint.next(10)[0] select_vector = vector(current_location, waypoint.transform.location) cross = np.cross(current_vector, select_vector)[2] cross_list.append(cross) cross_to_waypoint[cross] = waypoint select_cross = None if direction > 0: select_cross = max(cross_list) elif direction < 0: select_cross = min(cross_list) else: select_cross = min(cross_list, key=abs) return cross_to_waypoint[select_cross] def get_intersection(ego_actor, other_actor): """ Obtain a intersection point between two actor's location @return the intersection location """ waypoint = CarlaDataProvider.get_map().get_waypoint(ego_actor.get_location()) waypoint_other = CarlaDataProvider.get_map().get_waypoint(other_actor.get_location()) max_dist = float("inf") distance = float("inf") while distance <= max_dist: max_dist = distance current_location = waypoint.transform.location waypoint_choice = waypoint.next(1) # Select the straighter path at intersection if len(waypoint_choice) > 1: max_dot = -1 * float('inf') loc_projection = current_location + carla.Location( x=math.cos(math.radians(waypoint.transform.rotation.yaw)), y=math.sin(math.radians(waypoint.transform.rotation.yaw))) v_current = vector(current_location, loc_projection) for wp_select in waypoint_choice: v_select = vector(current_location, wp_select.transform.location) dot_select = np.dot(v_current, v_select) if dot_select > max_dot: max_dot = dot_select waypoint = wp_select else: waypoint = waypoint_choice[0] distance = current_location.distance(waypoint_other.transform.location) return current_location def detect_lane_obstacle(actor, extension_factor=3, margin=1.02): """ This function identifies if an obstacle is present in front of the reference actor """ world = CarlaDataProvider.get_world() world_actors = world.get_actors().filter('vehicle.*') actor_bbox = actor.bounding_box actor_transform = actor.get_transform() actor_location = actor_transform.location actor_vector = actor_transform.rotation.get_forward_vector() actor_vector =
np.array([actor_vector.x, actor_vector.y])
numpy.array
import time import bisect import numpy as np import pandas as pd import networkx as nx import scipy import scipy.optimize import scipy as sp import os, math import matplotlib.pyplot as plt from joblib import Parallel, delayed from lib.priorityqueue import PriorityQueue from lib.measures import (MeasureList, BetaMultiplierMeasureBySite, UpperBoundCasesBetaMultiplier, UpperBoundCasesSocialDistancing, SocialDistancingForAllMeasure, BetaMultiplierMeasureByType, SocialDistancingPerStateMeasure, SocialDistancingForPositiveMeasure, SocialDistancingForPositiveMeasureHousehold, SocialDistancingByAgeMeasure, SocialDistancingForSmartTracing, ComplianceForAllMeasure, SocialDistancingForKGroups, ComplianceForEssentialWorkers, SocialDistancingForNonEssential, SocialDistancingForSmartTracingHousehold) TO_HOURS = 24.0 class DiseaseModel(object): """ Simulate continuous-time SEIR epidemics with exponentially distributed inter-event times. All units in the simulator are in hours for numerical stability, though disease parameters are assumed to be in units of days as usual in epidemiology """ def __init__(self, mob, distributions, dynamic_tracing=False): """ Init simulation object with parameters Arguments: --------- mob: object of class MobilitySimulator providing mobility data dynamic_tracing: bool If true contacts are computed on-the-fly during launch_epidemic instead of using the previously filled contact array """ # cache settings self.mob = mob self.d = distributions self.dynamic_tracing = dynamic_tracing # parse distributions object self.lambda_0 = self.d.lambda_0 self.gamma = self.d.gamma self.fatality_rates_by_age = self.d.fatality_rates_by_age self.p_hospital_by_age = self.d.p_hospital_by_age self.delta = self.d.delta # parse mobility object self.n_people = mob.num_people self.n_sites = mob.num_sites self.max_time = mob.max_time # special state variables from mob object self.people_age = mob.people_age self.num_age_groups = mob.num_age_groups self.site_type = mob.site_type self.site_dict = mob.site_dict self.num_site_types = mob.num_site_types self.people_household = mob.people_household self.households = mob.households self.social_graph = mob.social_graph assert(self.num_age_groups == self.fatality_rates_by_age.shape[0]) assert(self.num_age_groups == self.p_hospital_by_age.shape[0]) # print self.last_print = time.time() self._PRINT_INTERVAL = 0.1 self._PRINT_MSG = ( 't: {t:.2f} ' '| ' '{maxt:.2f} hrs ' '({maxd:.0f} d)' ) def __print(self, t, force=False): if ((time.time() - self.last_print > self._PRINT_INTERVAL) or force) and self.verbose: print('\r', self._PRINT_MSG.format(t=t, maxt=self.max_time, maxd=self.max_time / 24), sep='', end='', flush=True) self.last_print = time.time() def __init_run(self): """ Initialize the run of the epidemic """ self.queue = PriorityQueue() self.testing_queue = PriorityQueue() ''' State and queue codes (transition event into this state) 'susc': susceptible 'expo': exposed 'ipre': infectious pre-symptomatic 'isym': infectious symptomatic 'iasy': infectious asymptomatic 'posi': tested positive 'nega': tested negative 'resi': resistant 'dead': dead 'hosp': hospitalized 'test': event of i getting a test (transitions to posi if not susc) 'execute_tests': generic event indicating that testing queue should be processed ''' self.legal_states = ['susc', 'expo', 'ipre', 'isym', 'iasy', 'posi', 'nega', 'resi', 'dead', 'hosp'] self.legal_preceeding_state = { 'expo' : ['susc',], 'ipre' : ['expo',], 'isym' : ['ipre',], 'iasy' : ['expo',], 'posi' : ['isym', 'ipre', 'iasy', 'expo'], 'nega' : ['susc', 'resi'], 'resi' : ['isym', 'iasy'], 'dead' : ['isym',], 'hosp' : ['isym',], } self.state = { 'susc': np.ones(self.n_people, dtype='bool'), 'expo': np.zeros(self.n_people, dtype='bool'), 'ipre': np.zeros(self.n_people, dtype='bool'), 'isym': np.zeros(self.n_people, dtype='bool'), 'iasy': np.zeros(self.n_people, dtype='bool'), 'posi': np.zeros(self.n_people, dtype='bool'), 'nega': np.zeros(self.n_people, dtype='bool'), 'resi': np.zeros(self.n_people, dtype='bool'), 'dead': np.zeros(self.n_people, dtype='bool'), 'hosp': np.zeros(self.n_people, dtype='bool'), } self.state_started_at = { 'susc': - np.inf * np.ones(self.n_people, dtype='float'), 'expo': np.inf * np.ones(self.n_people, dtype='float'), 'ipre': np.inf * np.ones(self.n_people, dtype='float'), 'isym': np.inf * np.ones(self.n_people, dtype='float'), 'iasy': np.inf * np.ones(self.n_people, dtype='float'), 'posi': np.inf *
np.ones(self.n_people, dtype='float')
numpy.ones
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Dec 23 10:35:45 2020 @author: luca """ # !/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat Dec 5 22:17:38 2020 @author: luca """ import networkx as nx import numpy as np import matplotlib.pyplot as plt import numpy.random as rnd from scipy.stats import lognorm, gamma, beta from scipy.sparse import lil_matrix as sparse_matrix class Distribution(): def __init__(self): pass def sample(self): pass def sample_sequence(self, n): pass class BetaDistribution(Distribution): def __init__(self, a, b): self.a = a self.b = b def mean(self): return beta.mean(self.a, self.b) def sample(self): return beta.rvs(self.a, self.b) def sample_sequence(self, n): return beta.rvs(self.a, self.b, size=n) class GammaDistribution(Distribution): def __init__(self, shape, scale, loc=0, max_range=None, rounding=True): self.shape = shape self.loc = loc self.scale = scale self.max_range = max_range if rounding: self.post_process = np.round else: self.post_process = lambda x: x def mean(self): return gamma.mean(self.shape, self.loc, self.scale) def sample(self): if self.max_range == None: return self.post_process(gamma.rvs(self.shape, self.loc, self.scale)) else: x = self.max_range + 1 while x > self.max_range: x = self.post_process(gamma.rvs(self.shape, self.loc, self.scale)) return x def sample_sequence(self, n): if self.max_range == None: return np.around(gamma.rvs(self.shape, self.loc, self.scale, size=n)) else: s = np.zeros(n) for i in range(n): s[i] = self.sample() class LognormalDistribution(Distribution): def __init__(self, shape, loc, scale, rounding=True): self.shape = shape self.loc = loc self.scale = scale if rounding: self.post_process = np.round else: self.post_process = lambda x: x def mean(self): return lognorm.mean(self.shape, self.loc, self.scale) def sample(self): return self.post_process(lognorm.rvs(self.shape, self.loc, self.scale)) def sample_sequence(self, n): return np.around(lognorm.rvs(self.shape, self.loc, self.scale, size=n)) class DeterministicDistribution(Distribution): def __init__(self, value): self.value = value def mean(self): return self.value def sample(self): return self.value def sample_sequence(self, n): return self.value * np.ones(n) class PoissonDistribution(Distribution): def __init__(self, mean): self.mean = mean def sample(self): return np.random.poisson(self.mean) def sample_sequence(self, n): return np.random.poisson(self.mean, n) class CategoricalDistribution(Distribution): def __init__(self, categories, probabilities): self.categories = categories self.probabilities = np.array(probabilities) def sample(self): return np.random.choice(self.categories, p=self.probabilities) def sample_sequence(self, n): return np.random.choice(self.categories, n, p=self.probabilities) class DeterministicCategoricalDistribution(Distribution): def __init__(self, category): self.category = category def sample(self): return self.category def sample_sequence(self, n): return [self.category for i in range(n)] class DiscreteDistribution(Distribution): def __init__(self, counts, max_range, name): self.counts = counts self.max_range = max_range self.values = np.arange(max_range + 1) self.name = name self.total = np.sum(counts) self.probabilities = counts / self.total def mean(self): return np.dot(self.values, self.probabilities) def sample(self): return np.random.choice(self.values, p=self.probabilities) def sample_sequence(self, n): return np.random.choice(self.values, n, p=self.probabilities) def conditional(self, lower, upper): """ Conditions the sampling to be included in [lower,upper] """ if lower > self.max_range: raise Exception("Conditioning not allowed, lower bound exceeds distribution range") if lower == 0 and upper == np.inf: self.probabilities = self.counts / self.total else: mask = np.zeros(self.max_range + 1) for i in range(lower, upper + 1): mask[i] = 1 self.probabilities = self.counts * mask / np.sum(self.counts * mask) class State(): """ the state of an agent, this class contains also the change dynamics and age information model: has to be a StateTransitionModel """ def __init__(self, model): self.model = model self.reset_state() def reset_state(self): self.state = self.model.initial_state.sample() self.update_age() def sample_state(self, distribution): self.state = distribution.sample() self.update_age() def is_susceptible(self): return self.model.susceptible[self.state] def is_infective(self): return self.model.infective[self.state] def update_age_needed(self): return self.model.has_age[self.state] def update(self, infected=False): if infected and self.is_susceptible(): self.change_state() return 1 elif self.update_age_needed(): self.age -= 1 if self.age == 0: self.change_state() return 0 def is_state(self, state): return self.state == state def get_state(self): return self.state def change_state(self): self.state = self.model.next_state[self.state].sample() self.update_age() def update_age(self): if self.update_age_needed(): self.age = self.model.age_distribution[self.state].sample() else: self.age = 0 class StateTransitionModel(): def __init__(self, states=["S", "I", "R"], susceptible={"S": True, "I": False, "R": False}, infective={"S": False, "I": True, "R": False}, has_age={"S": False, "I": True, "R": False}, age_distribution={"I": DeterministicDistribution(2)}, next_state={"S": DeterministicCategoricalDistribution("I"), "I": DeterministicCategoricalDistribution("R")}, initial_state=CategoricalDistribution(["S", "I", "R"], [0.95, 0.05, 0.0])): self.states = states self.susceptible = susceptible self.infective = infective self.has_age = has_age self.age_distribution = age_distribution self.next_state = next_state self.initial_state = initial_state # definire edge con -log(1-infection_prob) -> più alto il valore più alta la probabilità di contagio # definire una event probability per ogni nodo, la campiono da una discreta con 0, 1/30, 1/15, 1/7, 3/7, prob 0.2 each # infection probability è Beta (2,38) # nella simulazione, per ogni nodk scelgo se resta, genero la nuova rete, calcolo per ogni susc la somma delle probability di infezione class Network(): def __init__(self, node_number, state_model, main_degree_distribution, event_degree_distribution, event_probability, infection_probability_distribution): self.node_number = node_number self.model = state_model self.main_degree_distribution = main_degree_distribution self.event_degree_distribution = event_degree_distribution self.event_probability = event_probability self.infection_probability_distribution = infection_probability_distribution self.generate_random_network() self.nodes = self.network.nodes self.event_network = None def generate_random_network(self): """ Generates a random network with given degree distribution """ degrees = self.main_degree_distribution.sample_sequence(self.node_number) self.network = nx.expected_degree_graph(degrees, seed=None, selfloops=False) for n in self.network.nodes: self.network.nodes[n]['state'] = State(self.model) self.network.nodes[n]['event_prob'] = self.event_probability.sample() for e in self.network.edges: self.network[e[0]][e[1]]['p'] = self.infection_probability_distribution.sample() self.network[e[0]][e[1]]['mlogp'] = -np.log(self.network[e[0]][e[1]]['p']) def sample_event_network(self): # sample subset of nodes, by sampling a degree sequence with several zeros degrees =
np.zeros(self.node_number, dtype=np.int16)
numpy.zeros
import numpy as np def l2_regularization(W, reg_strength): ''' Computes L2 regularization loss on weights and its gradient Arguments: W, np array - weights reg_strength - float value Returns: loss, single value - l2 regularization loss gradient, np.array same shape as W - gradient of weight by l2 loss ''' # TODO: implement l2 regularization and gradient # Your final implementation shouldn't have any loops # regularization_strength * sumij W[i, j]2 # loss = reg_strength * np.trace(np.dot(W.T, W)) # L2(W) = λ * tr(W.T * W) loss = reg_strength * np.sum(W * W) grad = 2 * reg_strength * W # dL2(W)/dW = 2 * λ * W return loss, grad def softmax(_predictions): ''' Computes probabilities from scores Arguments: predictions, np array, shape is either (N) or (batch_size, N) - classifier outp¬ut Returns: probs, np array of the same shape as predictions - probability for every class, 0..1 ''' # TODO implement softmax # Your final implementation shouldn't have any loops predictions = _predictions.copy() if len(predictions.shape) == 1: predictions -= np.max(predictions) # , axis=1)[:,None] values = np.exp(predictions) probs = values / np.sum(values) # , axis=1)[:, None] else: predictions -= np.max(predictions, axis=1)[:, None] values = np.exp(predictions) probs = values / np.sum(values, axis=1)[:, None] return probs def cross_entropy_loss(probs, target_index): ''' Computes cross-entropy loss Arguments: probs, np array, shape is either (N) or (batch_size, N) - probabilities for every class target_index: np array of int, shape is (1) or (batch_size) - index of the true class for given sample(s) Returns: loss: single value ''' # TODO implement cross-entropy # Your final implementation shouldn't have any loops old_result = cross_entropy_loss_old(probs, target_index) if isinstance(target_index, int) or len(probs.shape) == 1: return -np.log(probs[target_index]) else: target_probs = probs[np.arange(len(target_index)), target_index.flatten()] value = -np.log(target_probs) result = np.mean(value) assert old_result == result return result def cross_entropy_loss_old(probs, target_index): ''' Computes cross-entropy loss Arguments: probs, np array, shape is either (N) or (batch_size, N) - probabilities for every class target_index: np array of int, shape is (1) or (batch_size) - index of the true class for given sample(s) Returns: loss: single value ''' rows = np.arange(target_index.shape[0]) cols = target_index return np.mean(-np.log(probs[rows, cols])) # L def softmax_with_cross_entropy(predictions, target_index): ''' Computes softmax and cross-entropy loss for model predictions, including the gradient Arguments: predictions, np array, shape is either (N) or (batch_size, N) - classifier output target_index: np array of int, shape is (1) or (batch_size) - index of the true class for given sample(s) Returns: loss, single value - cross-entropy loss dprediction, np array same shape as predictions - gradient of predictions by loss value ''' # TODO implement softmax with cross-entropy probs = softmax(predictions) loss = cross_entropy_loss(probs, target_index) # Your final implementation shouldn't have any loops dprediction = probs.copy() if len(predictions.shape) == 1: dprediction[target_index] -= 1 # dL/dZ = (S - 1(y)) else: dprediction[np.arange(len(dprediction)), target_index.flatten()] -= 1 dprediction = dprediction / target_index.shape[0] return loss, dprediction class Param: """ Trainable parameter of the model Captures both parameter value and the gradient """ def __init__(self, value): self.value = value self.grad = np.zeros_like(value) def reset_grad(self): self.grad = np.zeros_like(self.value) def __str__(self) -> str: super().__str__() return f'value: {self.value}, gradient: {self.grad}' def ReLU(X): return (X + np.abs(X)) / 2 class ReLULayer: def __init__(self): self.positive = None self.x = None pass def forward(self, X): # TODO: Implement forward pass # Hint: you'll need to save some information about X # to use it later in the backward pass self.x = X self.mask = (X > 0) # result = ReLU(X) return X * self.mask def backward(self, d_out): """ Backward pass Arguments: d_out, np array (batch_size, num_features) - gradient of loss function with respect to output Returns: d_result: np array (batch_size, num_features) - gradient with respect to input """ # TODO: Implement backward pass # Your final implementation shouldn't have any loops d_result = self.mask * d_out return d_result def params(self): # ReLU Doesn't have any parameters return {} class FullyConnectedLayer: def __init__(self, n_input, n_output): self.W = Param(0.001 * np.random.randn(n_input, n_output)) self.B = Param(0.001 * np.random.randn(1, n_output)) self.X = None def forward(self, X): # TODO: Implement forward pass # Your final implementation shouldn't have any loops self.X = X.copy() result = np.dot(X, self.W.value) + self.B.value return result def backward(self, d_out): """ Backward pass Computes gradient with respect to input and accumulates gradients within self.W and self.B Arguments: d_out, np array (batch_size, n_output) - gradient of loss function with respect to output Returns: d_result: np array (batch_size, n_input) - gradient with respect to input """ # TODO: Implement backward pass # Compute both gradient with respect to input # and gradients with respect to W and B # Add gradients of W and B to their `grad` attribute # It should be pretty similar to linear classifier from # n_input, n_output # X = (batch_size, input_features) # batch_size, n_output # the previous assignment dw = np.dot(self.X.T, d_out) self.W.grad += dw E =
np.ones(shape=(1, self.X.shape[0]))
numpy.ones
""" The Pshpere module contains a class named PShpere which allows the user to generate synthetic porous media, and to get information about that porous media A user can instantiate and use the PSphere() object as follows: >>> from lb_colloids import PSphere >>> img = Psphere(dimension=200, radius=20, porosity=0.375, sensitivity=0.01) >>> # hydraulic radius can be calculated >>> rh = img.calculate_hydraulic_radius(resolution=1e-06) >>> # to get a copy of the porous media use >>> matrix = img.matrix >>> # save the image >>> img.save("test_image.png") """ import numpy as np import random import math from PIL import Image import matplotlib.pyplot as plt class PSphere(object): """ Pshpere is a class that allows for the automated generation of synthetic porous media in two-dimensions. This approach can be expanded to three dimensions with some effort. Parameters: ---------- :param int radius: grain size radius :param float porosity: target porosity for porous media :param int dimension: the x and y dimension in pixels for the domain :param float sensitivity: a porosity sensitivity target. This is the allowable range of error for PShpere """ def __init__(self, radius=20, porosity=0.5, dimension=256, sensitivity=0.08): self.radius = radius self.porosity = porosity self.sensitivity = sensitivity self.dimension = dimension self.matrix = np.ones((dimension, dimension), dtype=bool) self.matrix_porosity = 0. self.matrix_rh = 0. self.particle_space = False self.pore_space = True self.percolates = False good = False while not good: self.generate_plane() self.check_percolation() self.check_porosity() print(self.matrix_porosity) if abs(self.matrix_porosity - self.porosity) <= self.sensitivity: if self.percolates: good = True else: print("Regenerating porous media") self.matrix = np.ones((dimension, dimension), dtype=bool) # self.percolates = False def get_matrix(self): matrix = np.invert(self.matrix) return matrix.astype(bool) def generate_plane(self): """ Main method used to generate a porous media plane by PSphere, this should not be called by the user """ porosity = self.porosity slice_location = self.dimension / 2 low_bound = slice_location - int(self.radius) up_bound = slice_location + int(self.radius) if low_bound <= 0 or up_bound > self.dimension: raise AssertionError("Radius to large or slice location incorrect") relative_radius = self.radius / float(self.dimension) number_of_spheres = self.iround(-3.0 * np.log(self.porosity) / (4 * np.pi * relative_radius ** 3)) for i in range(number_of_spheres): z = 1 + random.uniform(0, self.dimension) if up_bound > z > low_bound: x = 1 + int(random.uniform(0, self.dimension)) y = 1 + int(random.uniform(0, self.dimension)) slice_distance = abs(z - slice_location) slice_radius = np.sqrt(self.radius ** 2 - slice_distance ** 2) - 0.5 if slice_radius < 0 or
np.isnan(slice_radius)
numpy.isnan
import cv2 import numpy as np import time import os import pickle from tqdm import tqdm import keras def preprocess(img): cnt, heir = cv2.findContours(img[:,:,0],cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) if (len(cnt)>0): x,y,w,h = cv2.boundingRect(cnt[0]) temp = img[y:y+h, x:x+w, 0] w1 = (h*3)//4 temp = cv2.copyMakeBorder(temp, 0,0, max(w1-w,0)//2, max(w1-w,0)//2, cv2.BORDER_CONSTANT, (0,0,0)) return cv2.resize(temp, (120,160)) else: return np.zeros((160,120)) def get_feature_vectors(imgs,k=10 , preproc = None, eigvec=None, eigvalue=None): if preproc is None: preproc = prerprocess G = np.vstack(tuple(preproc(img).reshape(-1).astype(np.float64)/255. for img in imgs)) avg = G.T.mean(axis=1) A = G.T-avg.reshape(-1,1) info = {} if eigvec is None: X = A.T@A / A.shape[1] eigvalue, eigvec = np.linalg.eigh(X) info['eigvalue'] = eigvalue info['eigvec'] = eigvec else: assert eigvalue is not None U = A@eigvec u = U/np.linalg.norm(U, axis=0) u_sorted = u[:,np.argsort(eigvalue)[::-1]] u_k = u_sorted[:,0:k] W = u_k.T@A A1 = u_k@W return W, A1, A, u_k, avg, info class KMeans(): def __init__(self, K = 16, debug=False): self.K = K self.debug = debug self.states = [[] for _ in range(self.K)] self.P = np.zeros((10,self.K)) def fit(self,W): self.W = W for j in range(self.W.shape[1]): self.states[int(j*self.K/self.W.shape[1])].append(j) for i, state in enumerate(self.states): self.P[:,i] = self.W[:,state].mean(axis=1) self.converge() def converge(self): changes = 0 while changes < 5 : prev_states = self.states for i, state in enumerate(self.states): self.P[:,i] = self.W[:,state].mean(axis=1) temp_states = [] for i in range(self.K): temp_state = [] for j in [(i-1)%self.K, (i+1)%self.K]: if i == j: continue for state in self.states[j]: if np.linalg.norm(self.W[:,state]-self.P[:,i]) <= np.linalg.norm(self.W[:,state]-self.P[:,j]): if self.debug : print(i,j) temp_state.append([j,state]) temp_states.append(temp_state) for i, temp_state in enumerate(temp_states): temp_state.sort(key=lambda x : -x[1] if x[0]<i else x[0]) for clst, frm in temp_state: if clst<i: if self.states[clst][-1] == frm and len(self.states[clst])>1: self.states[clst].remove(frm) else: temp_states[i].remove([clst,frm]) else: if self.states[clst][0] == frm and len(self.states[clst])>1: self.states[clst].remove(frm) else: temp_states[i].remove([clst,frm]) for i in range(len(self.states)): for _, x in temp_states[i]: self.states[i].append(x) self.states[i].sort() if self.states != prev_states: changes = 0 else: changes += 1 def predict(self, W_t): MV = np.array(tuple(
np.linalg.norm(W_t - self.P[:,i:i+1],axis=0)
numpy.linalg.norm
import numpy as np from typing import List, Dict class CorrelationResult: pearson: np.ndarray pearson_p: np.ndarray spearman: np.ndarray spearman_p: np.ndarray normality: np.ndarray def __init__(self, inputs_len, outputs_len): self.pearson = np.full((inputs_len, outputs_len), np.nan, np.float32) self.pearson_p = np.full((inputs_len, outputs_len), np.nan, np.float32) self.spearman = np.full((inputs_len, outputs_len), np.nan, np.float32) self.spearman_p =
np.full((inputs_len, outputs_len), np.nan, np.float32)
numpy.full
import cv2, argparse import numpy as np def makeCartoon(original): # Make a copy of the origianl image to work with img = np.copy(original) # Convert image to grayscale imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Apply gaussian filter to the grayscale image imgGray = cv2.GaussianBlur(imgGray, (3,3), 0) # Detect edges in the image and threshold it edges = cv2.Laplacian(imgGray, cv2.CV_8U, ksize=5) edges = 255 - edges ret, edgeMask = cv2.threshold(edges, 150, 255, cv2.THRESH_BINARY) # Apply Edge preserving filter to get the heavily blurred image imgBilateral = cv2.edgePreservingFilter(img, flags=2, sigma_s=50, sigma_r=0.4) # Create a outputmatrix output = np.zeros(imgGray.shape) # Combine the cartoon and edges output = cv2.bitwise_and(imgBilateral, imgBilateral, mask=edgeMask) return output def clarendon(original): img = np.copy(original) # Separate the channels bChannel = img[:,:,0] gChannel = img[:,:,1] rChannel = img[:,:,2] # Specifying the x-axis for mapping xValues = np.array([0, 28, 56, 85, 113, 141, 170, 198, 227, 255]) # Specifying the y-axis for different channels rCurve = np.array([0, 16, 35, 64, 117, 163, 200, 222, 237, 249 ]) gCurve = np.array([0, 24, 49, 98, 141, 174, 201, 223, 239, 255 ]) bCurve = np.array([0, 38, 66, 104, 139, 175, 206, 226, 245, 255 ]) # Creating the LUT to store the interpolated mapping fullRange = np.arange(0,256) bLUT = np.interp(fullRange, xValues, bCurve ) gLUT = np.interp(fullRange, xValues, gCurve ) rLUT = np.interp(fullRange, xValues, rCurve ) # Applying the mapping to the image using LUT bChannel = cv2.LUT(bChannel, bLUT) gChannel = cv2.LUT(gChannel, gLUT) rChannel = cv2.LUT(rChannel, rLUT) # Converting back to uint8 img[:,:,0] = np.uint8(bChannel) img[:,:,1] = np.uint8(gChannel) img[:,:,2] = np.uint8(rChannel) return img def adjustSaturation(original, saturationScale = 1.0): img = np.copy(original) # Convert to HSV color space hsvImage = cv2.cvtColor(img,cv2.COLOR_BGR2HSV) # Convert to float32 hsvImage = np.float32(hsvImage) # Split the channels H, S, V = cv2.split(hsvImage) # Multiply S channel by scaling factor S = np.clip(S * saturationScale , 0, 255) # Merge the channels and show the output hsvImage = np.uint8( cv2.merge([H, S, V]) ) imSat = cv2.cvtColor(hsvImage, cv2.COLOR_HSV2BGR) return imSat def moon(original): img = np.copy(original) # Specifying the x-axis for mapping origin = np.array([0, 15, 30, 50, 70, 90, 120, 160, 180, 210, 255 ]) # Specifying the y-axis for mapping Curve = np.array([0, 0, 5, 15, 60, 110, 150, 190, 210, 230, 255 ]) # Creating the LUT to store the interpolated mapping fullRange = np.arange(0,256) LUT = np.interp(fullRange, origin, Curve ) # Applying the mapping to the L channel of the LAB color space labImage = cv2.cvtColor(img,cv2.COLOR_BGR2LAB) labImage[:,:,0] = cv2.LUT(labImage[:,:,0], LUT) img = cv2.cvtColor(labImage,cv2.COLOR_LAB2BGR) # Desaturating the image img = adjustSaturation(img,0.01) return img def adjustContrast(original, scaleFactor): img = np.copy(original) # Convert to YCrCb color space ycbImage = cv2.cvtColor(img,cv2.COLOR_BGR2YCrCb) # Convert to float32 since we will be doing multiplication operation ycbImage = np.float32(ycbImage) # Split the channels Ychannel, Cr, Cb = cv2.split(ycbImage) # Scale the Ychannel Ychannel = np.clip(Ychannel * scaleFactor , 0, 255) # Merge the channels and show the output ycbImage = np.uint8( cv2.merge([Ychannel, Cr, Cb]) ) img = cv2.cvtColor(ycbImage, cv2.COLOR_YCrCb2BGR) return img def applyVignette(original, vignetteScale): img = np.copy(original) # convert to float img = np.float32(img) rows,cols = img.shape[:2] # Compute the kernel size from the image dimensions k = np.min(img.shape[:2])/vignetteScale # Create a kernel to get the halo effect kernelX = cv2.getGaussianKernel(cols,k) kernelY = cv2.getGaussianKernel(rows,k) # generating vignette mask using Gaussian kernels kernel = kernelY * kernelX.T # Normalize the kernel mask = 255 * kernel / np.linalg.norm(kernel) mask = cv2.GaussianBlur(mask, (51,51), 0) # Apply the halo to all the channels of the image img[:,:,0] += img[:,:,0]*mask img[:,:,1] += img[:,:,1]*mask img[:,:,2] += img[:,:,2]*mask img =
np.clip(img/2, 0, 255)
numpy.clip
""" Parses the python AST below, transforms it to C, JITs it, and runs it. """ #logging.basicConfig(level=10) import ctypes as ct import numpy as np from ctree.c.nodes import * from ctree.nodes import Project from ctree.transformations import * from ctree.jit import LazySpecializedFunction from ctree.jit import ConcreteSpecializedFunction # from ctypes import CFUNCTYPE # --------------------------------------------------------------------------- # Specializer code class OpTranslator(LazySpecializedFunction): def args_to_subconfig(self, args): """ Analyze arguments and return a 'subconfig', a hashable object that classifies them. Arguments with identical subconfigs might be processed by the same generated code. """ A = args[0] return { 'ptr':
np.ctypeslib.ndpointer(A.dtype, A.ndim, A.shape)
numpy.ctypeslib.ndpointer
import matplotlib.pyplot as plt import numpy as np from scipy.optimize import curve_fit from astropy.io import ascii from uncertainties import ufloat import uncertainties.unumpy as unp # bisher: Berechnung der Schallgeschwindigkeit über unsere Messung mit v_0 und bestimmten lambda # zudem: Import der ganzen daten mittels Holz-Methode, arrays für die # difference sind jeweils fertig und können nun gegen den gang (eventuell # auch gegen die bestimmte geschwindigkeit v des Wagens) geplottet werden # plathalter für die plots, hier einfach die arrays Gang = np.linspace(1, 10, 10) # für vorwärts-und rückwärtsgang einfügen def nomvalues_array(array): List = list() for i in range(len(array)): List.append(array[i].nominal_value) array_noms = np.asarray(List) return array_noms # short function for generating arrays with nominalvalues # a) puls = np.genfromtxt( "Messdaten/adrianundclemens/adrianclemens_messunga.txt", unpack="True") n6h = ufloat(np.mean(puls[0:5]), np.std(puls[0:5]) / np.sqrt(5)) n6z = ufloat(np.mean(puls[5:10]), np.std(puls[5:10]) / np.sqrt(5)) n12h = ufloat(np.mean(puls[10:15]), np.std(puls[10:15]) / np.sqrt(5)) n12z = ufloat(np.mean(puls[15:20]), np.std(puls[15:20]) / np.sqrt(5)) n18h = ufloat(np.mean(puls[20:25]), np.std(puls[20:25]) / np.sqrt(5)) n18z = ufloat(np.mean(puls[25:30]), np.std(puls[25:30]) / np.sqrt(5)) n24h = ufloat(np.mean(puls[30:35]), np.std(puls[30:35]) / np.sqrt(5)) n24z = ufloat(np.mean(puls[35:40]), np.std(puls[35:40]) / np.sqrt(5)) n30h = ufloat(np.mean(puls[40:45]), np.std(puls[40:45]) / np.sqrt(5)) n30z = ufloat(np.mean(puls[45:50]), np.std(puls[45:50]) / np.sqrt(5)) n36h = ufloat(np.mean(puls[50:55]), np.std(puls[50:55]) / np.sqrt(5)) n36z = ufloat(np.mean(puls[55:60]), np.std(puls[55:60]) / np.sqrt(5)) n42h = ufloat(np.mean(puls[60:65]), np.std(puls[60:65]) / np.sqrt(5)) n42z = ufloat(np.mean(puls[65:70]), np.std(puls[65:70]) / np.sqrt(5)) n48h = ufloat(np.mean(puls[70:75]), np.std(puls[70:75]) / np.sqrt(5)) n48z = ufloat(np.mean(puls[75:80]), np.std(puls[75:80]) / np.sqrt(5)) n54h = ufloat(np.mean(puls[80:85]), np.std(puls[80:85]) / np.sqrt(5)) n54z = ufloat(np.mean(puls[85:90]), np.std(puls[85:90]) / np.sqrt(5)) n60h = ufloat(np.mean(puls[90:95]), np.std(puls[90:95]) / np.sqrt(5)) n60z = ufloat(np.mean(puls[95:100]), np.std(puls[95:100]) / np.sqrt(5)) List = [n6h, n6z, n12h, n12z, n18h, n18z, n24h, n24z, n30h, n30z, n36h, n36z, n42h, n42z, n48h, n48z, n54h, n54z, n60h, n60z] pulse = unp.uarray([n6h.n, n6z.n, n12h.n, n12z.n, n18h.n, n18z.n, n24h.n, n24z.n, n30h.n, n30z.n, n36h.n, n36z.n, n42h.n, n42z.n, n48h.n, n48z.n, n54h.n, n54z.n, n60h.n, n60z.n], [ n6h.s, n6z.s, n12h.s, n12z.s, n18h.s, n18z.s, n24h.s, n24z.s, n30h.s, n30z.s, n36h.s, n36z.s, n42h.s, n42z.s, n48h.s, n48z.s, n54h.s, n54z.s, n60h.s, n60z.s]) pulse_vor = [n6h, n12h, n18h, n24h, n30h, n36h, n42h, n48h, n54h, n60h] pulse_rueck = [-n60z, -n54z, -n48z, -n42z, - n36z, -n30z, -n24z, -n18z, -n12z, -n6z] pulse_vor_noms = nomvalues_array(pulse_vor) pulse_rueck_noms = nomvalues_array(pulse_rueck) pulse_vor_noms = pulse_vor_noms.tolist() pulse_rueck_noms = pulse_rueck_noms.tolist() pulse_noms = pulse_rueck_noms + pulse_vor_noms pulse_noms = np.asarray(pulse_noms) print(pulse_noms) s = 0.2 t = (10**(-4) * pulse_noms) pace_noms = s / ((10**(-4)) * pulse_noms) v = pace_noms s = 0.2 t = (10**(-4) * pulse) pace = s / (10**(-4) * pulse) print("Geschwindigkeit", v) ascii.write([List, t, pace], 'Messdaten/pace.tex', format='latex') print("v", v) v_lp = np.linspace(-0.6, 0.6) # ende a) ######### # b) berechnet mit unseren daten entartung, a, diff = np.genfromtxt("Messdaten/b.txt", unpack="True") diff = diff[1:6] ascii.write([diff, diff * 2], 'Messdaten/entartung_wir.tex', format='latex') diff = diff * 2 # in mm diff = diff * 10**(-3) wellenlaenge = ufloat(np.mean(diff), np.std(diff, ddof=1) / np.sqrt(len(diff))) print("Wellenlänge", wellenlaenge) messpunkt, vnull = np.genfromtxt( "Messdaten/unsereMessdaten_v_null.txt", unpack="True") ascii.write([messpunkt, vnull], 'Messdaten/v_null_wir.tex', format='latex') v_null = ufloat(np.mean(vnull), np.std(vnull, ddof=1) / np.sqrt(len(vnull))) c = wellenlaenge * v_null print("Ruhefrequenz", v_null) print("Schallgeschwindigkeit=", c) v_null_div_c = v_null / c eins_div_lambda = 1 / wellenlaenge print("Geforderte Größe (Gradenparameter später)=", v_null_div_c, eins_div_lambda) ###################################### # calculating diff ################################# # kannst du einfach löschen, wenn du die geschwindikeit des wagens für # Vor und zurück berechnet hast. Nenn das array am besten einfach genauso, dann musst du unten nichts ändern. ########################################### a, vnull_adrianclemens = np.genfromtxt( "Messdaten/adrianundclemens/clemensadrian_v_null.txt", unpack="True") # jedesmal muss gemessene frequenz umgerechnet werden vnull_adrianclemens = vnull_adrianclemens * 5 / 4 vnull_ac = ufloat(np.mean(vnull_adrianclemens), np.std( vnull_adrianclemens, ddof=1) / np.sqrt(len(vnull_adrianclemens))) print(vnull_ac) vq = np.genfromtxt( "Messdaten/adrianundclemens/adrian_clemens_d.txt", unpack="True") vq = vq * 5 / 4 g_quelle1vor = ufloat(np.mean(vq[0:5]), np.std( vq[0:5], ddof=1) / np.sqrt(len(vq[0:5]))) g_quelle1rueck = ufloat(np.mean(vq[5:10]), np.std( vq[5:10], ddof=1) / np.sqrt(len(vq[5:10]))) g_quelle2vor = ufloat(np.mean(vq[10:15]), np.std( vq[10:15], ddof=1) / np.sqrt(len(vq[10:15]))) g_quelle2rueck = ufloat(np.mean(vq[15:20]), np.std( vq[15:20], ddof=1) / np.sqrt(len(vq[15:20]))) g_quelle3vor = ufloat(np.mean(vq[20:25]), np.std( vq[20:25], ddof=1) / np.sqrt(len(vq[20:25]))) g_quelle3rueck = ufloat(np.mean(vq[25:30]), np.std( vq[25:30], ddof=1) / np.sqrt(len(vq[25:30]))) g_quelle4vor = ufloat(np.mean(vq[30:35]), np.std( vq[30:35], ddof=1) / np.sqrt(len(vq[30:35]))) g_quelle4rueck = ufloat(np.mean(vq[35:40]), np.std( vq[35:40], ddof=1) / np.sqrt(len(vq[35:40]))) g_quelle5vor = ufloat(np.mean(vq[40:45]), np.std( vq[40:45], ddof=1) / np.sqrt(len(vq[40:45]))) g_quelle5rueck = ufloat(
np.mean(vq[45:50])
numpy.mean
import numpy as np import math import matplotlib.pyplot as plt f=2 fs=100 N=6 A = 4 n=20 t = np.arange(0,N/f,1.0/fs) s1_t = lambda t : A*
np.sin(2*math.pi*f*t)
numpy.sin
from PIL import Image import numpy as np from jax import numpy as jnp from util.image import gradient, gradient_to_img, laplace, laplacian_to_img from abc import ABC, abstractmethod def get_data_loader_cls_by_type(type): if type == "normal": return NormalImageLoader elif type == "gradient": return GradientImageLoader elif type == "laplacian": return LaplacianImageLoader elif type == "combined": return CombinedImageLoader raise ValueError("Wrong data loader type: {}".format(type)) class BaseImageLoader(ABC): def __init__(self, img_path, num_channels, size=0, batch_size=0): img = Image.open(img_path) if size > 0: img = img.resize((size, size)) if num_channels == 3: img = img.convert("RGB") img_array = np.array(img) elif num_channels == 1: img = img.convert("L") img_array = np.array(img) img_array = np.expand_dims(img_array, axis=-1) else: raise ValueError("Wrong number of channels") self.original_pil_img = img # self.input_img = normalize_img(img_array) self.gt_img = self.create_ground_truth_img(img_array) self.do_batch = batch_size != 0 self.batch_size = batch_size self.x, self.y = image_array_to_xy(self.gt_img) self.create_batches() self.cursor = 0 @abstractmethod def create_ground_truth_img(self): pass def __iter__(self): return self def __next__(self): try: data = self.get(self.cursor) except IndexError: raise StopIteration self.cursor += 1 return data def __len__(self): return self.num_batches def create_batches(self): if not self.do_batch: self.batched_x, self.num_batches = split_to_batches(self.x, size=0) self.batched_y, self.num_batches = split_to_batches(self.y, size=0) else: shuffled_x, shuffled_y = shuffle_arrays_in_same_order([self.x, self.y]) self.batched_x, self.num_batches = split_to_batches( self.x, size=self.batch_size ) self.batched_y, self.num_batches = split_to_batches( self.y, size=self.batch_size ) def get(self, i): x = jnp.array(self.batched_x[i]) y = jnp.array(self.batched_y[i]) data = {"input": x, "output": y} return data @abstractmethod def get_ground_truth_image(self): pass class NormalImageLoader(BaseImageLoader): def create_ground_truth_img(self, img_array): return normalize_img(img_array) def get_ground_truth_image(self): img = unnormalize_img(self.gt_img) img = img.squeeze() return Image.fromarray(np.uint8(img)) class GradientImageLoader(BaseImageLoader): def create_ground_truth_img(self, img_array): img = normalize_img(img_array) return gradient(img * 1e1) def get_ground_truth_image(self): img = gradient_to_img(self.gt_img) img = img.squeeze() return Image.fromarray(np.uint8(img)) class LaplacianImageLoader(BaseImageLoader): def create_ground_truth_img(self, img_array): img = normalize_img(img_array) return laplace(img * 1e4) def get_ground_truth_image(self): img = laplacian_to_img(self.gt_img) return Image.fromarray(np.uint8(img)) class CombinedImageLoader: def __init__(self, img_path, num_channels, size=0, batch_size=0): img = Image.open(img_path) if size > 0: img = img.resize((size, size)) if num_channels != 1: raise ValueError("Only supports 1 channels for now") img = img.convert("L") img_array = np.array(img) img_array = np.expand_dims(img_array, axis=-1) self.original_pil_img = img self.gt_vanilla = normalize_img(img_array) self.gt_gradient = gradient(normalize_img(img_array)) self.gt_laplacian = laplace(normalize_img(img_array)) self.do_batch = batch_size != 0 self.batch_size = batch_size self.x, self.y_vanilla = image_array_to_xy(self.gt_vanilla) _, self.y_gradient = image_array_to_xy(self.gt_gradient) _, self.y_laplacian = image_array_to_xy(self.gt_laplacian) self.create_batches() self.cursor = 0 def __iter__(self): return self def __next__(self): try: data = self.get(self.cursor) except IndexError: raise StopIteration self.cursor += 1 return data def create_batches(self): x, y_vani, y_grad, y_lapl = ( self.x, self.y_vanilla, self.y_gradient, self.y_laplacian, ) if self.do_batch: x, y_vani, y_grad, y_lapl = shuffle_arrays_in_same_order( [x, y_vani, y_grad, y_lapl] ) self.batched_x, self.num_batches = split_to_batches(x, size=self.batch_size) self.batched_y_vanilla, _ = split_to_batches(y_vani, size=self.batch_size) self.batched_y_gradient, _ = split_to_batches(y_grad, size=self.batch_size) self.batched_y_laplacian, _ = split_to_batches(y_lapl, size=self.batch_size) def get(self, i): x = jnp.array(self.batched_x[i]) y_vanilla = jnp.array(self.batched_y_vanilla[i]) y_gradient = jnp.array(self.batched_y_gradient[i]) y_laplacian = jnp.array(self.batched_y_laplacian[i]) data = { "input": x, "vanilla": y_vanilla, "gradient": y_gradient, "laplacian": y_laplacian, } return data def get_ground_truth_image(self): img = unnormalize_img(self.gt_vanilla) img = img.squeeze() return Image.fromarray(np.uint8(img)) def normalize_img(img_array): img_array = img_array / 255.0 return (img_array - 0.5) / 0.5 def unnormalize_img(img_array): return (img_array * 0.5 + 0.5) * 255 def convert_to_normalized_index(width, height): normalized_index = [] i = np.linspace(-1, 1, width) j = np.linspace(-1, 1, height) ii, jj = np.meshgrid(i, j, indexing="ij") normalized_index = np.stack([ii, jj], axis=-1) return np.reshape(normalized_index, (-1, 2)) def image_array_to_xy(img_array): width, height, channel = img_array.shape x = convert_to_normalized_index(width, height) num_channel = img_array.shape[-1] y = np.reshape(img_array, (-1, num_channel)) return x, np.array(y) def xy_to_image_array(x, y, width, height): w_idx = ((x[:, 0] + 1) / 2) * (width - 1) h_idx = ((x[:, 1] + 1) / 2) * (height - 1) w_idx =
np.around(w_idx)
numpy.around
import sys if "" not in sys.path: sys.path.append("") import os from glob import glob import warnings import numpy as np from PIL import Image from util.general import printProgressBar, print_result, print_warning def generate_paired_lists(cxr_paths, mask_paths, subset_n, split_masks=False): """ This function is used for a few purposes. Firstly, it checks whether every image has an accessory mask. Secondly, it generates two sorted lists (with image pairs in the proper order) Finally, it also generates a list of filenames under which to store the preprocessed data. """ cxr_sort = [] mask_sort = [] subject_names = [] missing_masks = 0 for subject_n in range(len(cxr_paths)): # Find CXR filename and look for matches in the mask list cxr_filename = os.path.split(cxr_paths[subject_n])[-1] if not split_masks: filename_matches = [mask_path for mask_path in mask_paths if os.path.splitext(cxr_filename)[0] in mask_path] else: filename_matches0 = [mask_path for mask_path in mask_paths[0] if os.path.splitext(cxr_filename)[0] in mask_path] filename_matches1 = [mask_path for mask_path in mask_paths[1] if os.path.splitext(cxr_filename)[0] in mask_path] if len(filename_matches0) == len(filename_matches1) == 1: filename_matches = [[filename_matches0[0], filename_matches1[0]]] else: warnings.warn("Missing either an R or L mask. " "Omitting entire mask") filename_matches = [] if type(filename_matches) == list and len(filename_matches) == 1: cxr_sort.append(cxr_paths[subject_n]) subject_names.append("{:d}_{:03d}".format(subset_n, subject_n)) if not split_masks: mask_sort.append(filename_matches[0]) else: mask_sort.append([filename_matches[0][0], filename_matches[0][1]]) elif type(filename_matches) == list and len(filename_matches) > 1: warnings.warn("Multiple matches found for a single subject name!") elif type(filename_matches) == list and len(filename_matches) == 0: missing_masks += 1 else: raise ValueError("Parameter 'filename_matches' " "should return a list") return cxr_sort, mask_sort, subject_names, missing_masks def combine_masks(mask1_path, mask2_path): """ This function combines two masks into one. It is primarily used to combine the seperate L/R masks of the Mntg dataset. """ mask1_img = Image.open(mask1_path) mask2_img = Image.open(mask2_path) mask1_array = np.asarray(mask1_img) mask2_array =
np.asarray(mask2_img)
numpy.asarray
import pandas as pd import seaborn as sns import json import matplotlib.pyplot as plt import sys from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 from sklearn.preprocessing import StandardScaler, LabelEncoder from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn import model_selection from sklearn.exceptions import UndefinedMetricWarning import warnings import numpy as np import scipy as sp class CPUGPUComparison(): def __init__( self ): print('CPU GPU SpTRSV performance comparison\n') def DrawComparisonTable(self, filename): perf_dataset = pd.read_csv(filename) winner_df = perf_dataset.idxmin(axis=1) winner_counts = winner_df.value_counts() norm_winner_counts = winner_df.value_counts(normalize=True)*100 print(" ----------------------------------------------------------------------------------------------------") print(" |%15s%35s%32s%15s |" % ("Architecture |","SpTRSV implementation |","Winner for # of matrices |",\ "Percentage")) print(" ----------------------------------------------------------------------------------------------------") print(" |%15s%35s%30d%s%13.2f %% |" % ("CPU |","MKL(seq) |", winner_counts['mkl_seq']," |",norm_winner_counts['mkl_seq'])) print(" |%15s%35s%30d%s%13.2f %% |" % ("|","MKL(par) |", winner_counts['mkl_par']," |",norm_winner_counts['mkl_par'])) print(" ----------------------------------------------------------------------------------------------------") print(" |%15s%35s%30d%s%13.2f %% |" % ("GPU |","cuSPARSE(v1) |", winner_counts['cusparse_v1']," |",norm_winner_counts['cusparse_v1'])) print(" |%15s%35s%30d%s%13.2f %% |" % ("|","cuSPARSE(v2)(level-sch.) |", winner_counts['cusparse_v2_lvl']," |",norm_winner_counts['cusparse_v2_lvl'])) print(" |%15s%35s%30d%s%13.2f %% |" % ("|","cuSPARSE(v2)(no level sch.) |", winner_counts['cusparse_v2_nolvl']," |",norm_winner_counts['cusparse_v2_nolvl'])) print(" |%15s%35s%30d%s%13.2f %% |" % ("|","Sync-Free |", winner_counts['syncfree']," |",norm_winner_counts['syncfree'])) print(" ----------------------------------------------------------------------------------------------------") def DrawStatsTable(self, filename): stats_dataset = pd.read_csv(filename) ds_median = stats_dataset.median() ds_min = stats_dataset.min() ds_max = stats_dataset.max() min_rows = ds_min['rows']/1000 median_rows = ds_median['rows']/1000 max_rows = ds_max['rows']/1000000 min_nnzs = ds_min['nnzs']/1000 median_nnzs = ds_median['nnzs']/1000 max_nnzs = ds_max['nnzs']/1000000 print(' ---------------------------------------------------------------------') print(" |%20s%16s%16s%16s"%(" |","Minimum |", "Median |","Maximum |")) print(' ---------------------------------------------------------------------') print(" |%20s%13.2fK%s%13.2fK%s%13.2fM%s"%("Number of rows |",min_rows," |", median_rows," |",max_rows, " |")) print(' ---------------------------------------------------------------------') print(" |%20s%13.3fK%s%13.3fK%s%13.3fM%s"%("Number of nonzeros |",min_nnzs, " |",median_nnzs, " |", max_nnzs," |")) print(' ---------------------------------------------------------------------') def DrawFigure(self, filename): perf_data = pd.read_csv(filename) perf_data.to_json("temp.json", orient='records') with open("temp.json", "r") as filename: V100_Gold_dataset_json = json.load(filename) V100_Gold_json_formatted = [] for i in range(0, 37): V100_Gold_json_formatted.append({ "Platform 1": V100_Gold_dataset_json[i]["Platform"], "Matrix 1": V100_Gold_dataset_json[i]["Matrix ID"], "Execution Time 1": V100_Gold_dataset_json[i]["Execution Time"], "Degree of Parallelism 1":V100_Gold_dataset_json[i]["Degree of Parallelism"], "Winner 1":V100_Gold_dataset_json[i]["Winner"], "Platform 2": V100_Gold_dataset_json[i+37]["Platform"], "Matrix 2": V100_Gold_dataset_json[i+37]["Matrix ID"], "Execution Time 2": V100_Gold_dataset_json[i+37]["Execution Time"], "Degree of Parallelism 2":V100_Gold_dataset_json[i]["Degree of Parallelism"], "Winner 2": V100_Gold_dataset_json[i+37]["Winner"]}) V100_Gold_json_formatted = sorted(V100_Gold_json_formatted, key = lambda i: (i['Winner 1'], i['Degree of Parallelism 1'])) V100_Gold_json_sorted = [] V100_Gold_Matrix = [] for i in range(0, 37): V100_Gold_json_sorted.append({ "Platform": V100_Gold_json_formatted[i]["Platform 1"], "Matrix ID": V100_Gold_json_formatted[i]["Matrix 1"], "Degree of Parallelism": V100_Gold_json_formatted[i]["Degree of Parallelism 1"], "Execution Time": V100_Gold_json_formatted[i]["Execution Time 1"], }) V100_Gold_Matrix.append(V100_Gold_json_formatted[i]["Matrix 1"]) for i in range(0, 37): V100_Gold_json_sorted.append({ "Platform": V100_Gold_json_formatted[i]["Platform 2"], "Matrix ID": V100_Gold_json_formatted[i]["Matrix 2"], "Degree of Parallelism": V100_Gold_json_formatted[i]["Degree of Parallelism 2"], "Execution Time": V100_Gold_json_formatted[i]["Execution Time 2"], }) with open("temp2.json", "w") as file2: json.dump(V100_Gold_json_sorted, file2) V100_Gold = pd.read_json('temp2.json', orient='records') plt.figure(figsize=(15,5)) p1 = sns.barplot(x="Matrix ID",y="Execution Time",hue="Platform", data=V100_Gold,palette = "magma", edgecolor = 'w', order=V100_Gold_Matrix) sns.set(font_scale = 1.3) sns.set_style("white") p1.set_yscale("log") p1.set_xticklabels(p1.get_xticklabels(), rotation=90) ax1 = p1.axes ax1.set(xticklabels=V100_Gold["Degree of Parallelism"]) ax1.axvline(12.5, ls='--', lw=1.8) ax1.text(1.0, 200, "GPU winners: 24") ax1.text(1.0, 120, "CPU winners: 13") p1.set_xlabel("Matrix degree of parallelism (DoP)") p1.set_ylabel("Lower triangular solve time (msec)") legend = p1.legend() legend.texts[0].set_text("NVIDIA V100") legend.texts[1].set_text("Intel Gold") plt.legend(loc='upper right') plt.setp(ax1.xaxis.get_majorticklabels(), ha='center') fig1 = p1.get_figure() fig1.set_rasterized(True) fig1.savefig('./datasets/figure2.eps', bbox_inches='tight',rasterized=True) print("Figure 2 saved in datasets directory as figure2.eps") plt.show() class FeatureSelection(): def __init__( self ): print('Feature Selection\n') def PrintAllFeatures(self, filename): features = pd.read_csv(filename) for col in features.columns: print(col) def FeatureRanking(self, filename): features_data = pd.read_csv(filename) features = features_data.drop(['winner'], axis = 1) target = features_data['winner'] features=features[:-2] target=target[:-2] KBestFeatures = SelectKBest(score_func=chi2, k=30) fit = KBestFeatures.fit(features, target) rank = [i+1 for i in range(30)] rank_dict = {'Rank':rank} rank_df = pd.DataFrame(data=rank_dict) feature_dict = {'Feature':features.columns, 'Score':fit.scores_} feature_df = pd.DataFrame(data=feature_dict) desc = ['Number of rows', 'Number of non-zeros','Number of levels', \ 'Maximum row length count', 'Maximum column length count', "Minimum column length count", \ 'Minimum row length count', 'Maximum non-zeros per level row-wise', \ 'Maximum non-zeros per level column-wise', 'Maximum row length', \ 'Maximum column length', 'Mean row-length',\ 'Maximum rows per level','Median rows per level', \ 'Median row length', 'Median column length', \ 'Mean non-zeros per level row-wise', 'Standard deviation rows per level', \ 'Standard deviation non-zeros per level row-wise', 'Standard deviation rows length', \ 'Standard deviation column length','Mean rows per level', 'Mean max column length per level', \ 'Mean mean column length per level', 'Mean std. deviation column length per level', \ 'Mean maximum row length per level','Mean standard deviation row length per level',\ 'Mean mean row length per level','Mean minimum row length per level',\ 'Mean median row length per level'] feature_df['Description'] = desc feature_df_sorted = feature_df.nlargest(30, 'Score') feature_df_sorted.reset_index(drop=True,inplace=True) feature_df_sorted.index += 1 print(feature_df_sorted.to_string(index=True)) class Prediction(): def __init__( self ): print('Prediction\n') def CrossValidation(self, filename, mode): training_data = pd.read_csv(filename) if mode == 1: # Traning set for 10 features X = training_data.drop(['min_rl_cnt','mean_rpl','median_rpl','max_cl','lvls','std_rpl', \ 'mean_max_cl_pl','mean_mean_cl_pl','max_rl','mean_std_cl_pl','mean_max_rl_pl',\ 'std_cl','mean_std_rl_pl','mean_mean_rl_pl','mean_median_rl_pl','mean_min_rl_pl',\ 'mean_rl','median_rl','median_cl','std_rl','mkl_seq','mkl_par','cusparse_v1',\ 'cusparse_v2_lvl','cusparse_v2_nolvl','syncfree','winner','CPU winner','GPU winner',\ '2nd','3rd','4th','5th','6th'], axis=1) else: # Traning set for 30 features X = training_data.drop(['mkl_seq','mkl_par','cusparse_v1','cusparse_v2_lvl', \ 'cusparse_v2_nolvl','syncfree','winner','CPU winner','GPU winner','2nd',\ '3rd','4th','5th','6th'], axis=1) y = training_data['winner'] sc = StandardScaler() X_scaled = sc.fit_transform(X) X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.25, random_state=44) rfc_algo_selection = RandomForestClassifier(n_estimators=300) rfc_algo_selection.fit(X_train, y_train) pred_rfc_algo_selection = rfc_algo_selection.predict(X_test) seed = 10 cv_results = [] accuracy = 'accuracy' precision = 'precision_weighted' recall = 'recall_weighted' f1_score = 'f1_weighted' test_precision = 'test_precision_weighted' test_recall = 'test_recall_weighted' test_f1 = 'test_f1_weighted' test_accuracy = 'test_accuracy' warnings.filterwarnings("ignore", category=UndefinedMetricWarning) scoring = [accuracy, precision, recall,f1_score] kfold = model_selection.KFold(n_splits=10, random_state=seed) with warnings.catch_warnings(): scores = model_selection.cross_validate(rfc_algo_selection, X_scaled, y, cv=kfold,scoring=scoring) cv_results.append(scores[test_accuracy]) cv_results.append(scores[test_precision]) cv_results.append(scores[test_recall]) cv_results.append(scores[test_f1]) print('Mean accuracy: %0.1f %%' % (cv_results[0].mean()*100.0)) print('Mean precision: %0.1f %%' % (cv_results[1].mean()*100.0)) print('Mean recall: %0.1f %%' % (cv_results[2].mean()*100.0)) print('Mean f1-score: %0.1f %%' % (cv_results[3].mean()*100.0)) print('Median accuracy: %0.1f %%' % (np.median(cv_results[0])*100.0)) print('Median precision: %0.1f %%' % (np.median(cv_results[1])*100.0)) print('Median recall: %0.1f %%' % (np.median(cv_results[2])*100.0)) print('Median f1-score: %0.1f %%\n' % (np.median(cv_results[3])*100.0)) labels = ['Accuracy', 'Precision', 'Recall', 'F1-score'] ax1 = sns.boxplot(y=cv_results,x=labels, showmeans=True, fliersize=1,meanprops={"marker":"D","markerfacecolor":"yellow", "markeredgecolor":"none"}) sns.set(font_scale=1.3) sns.set_style("white") vals = ax1.get_yticks() ax1.set_yticklabels(['{:,.0%}'.format(x) for x in vals]) myfigure = ax1.get_figure() if mode == 1: myfigure.savefig('./datasets/figure6.png',bbox_inches='tight') print("Figure 8 saved in datasets as figure8.eps") print("Note: Statistics can slightly vary from Figure 8 and from run-to-run") else: myfigure.savefig('./datasets/figure7.eps',bbox_inches='tight') myfigure.show() print("Figure 7 saved in datasets as figure7.eps") print("Note: Statistics can slightly vary from Figure 7 and from run-to-run") plt.show() class Performance(): def __init__( self ): print('Performance Results\n') def Speedup(self, filename): training_data = pd.read_csv(filename) X = training_data.drop(['mkl_seq','mkl_par','cusparse_v1','cusparse_v2_lvl', \ 'cusparse_v2_nolvl','syncfree','winner','CPU winner','GPU winner','2nd',\ '3rd','4th','5th','6th'], axis=1) y = training_data['winner'] sc = StandardScaler() X_scaled = sc.fit_transform(X) X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.25, random_state=44) rfc_algo_selection = RandomForestClassifier(n_estimators=300) rfc_algo_selection.fit(X_train, y_train) pred_rfc_algo_selection = rfc_algo_selection.predict(X_test) seed = 10 precision = 'precision_weighted' recall = 'recall_weighted' f1_score = 'f1_weighted' scoring = [precision, recall,f1_score] kfold = model_selection.KFold(n_splits=10) cross_validate_pred = model_selection.cross_val_predict(rfc_algo_selection, X_scaled, y, cv=kfold) MKL_seq = training_data['mkl_seq'] MKL_par = training_data['mkl_par'] cus1 = training_data['cusparse_v1'] cus2_lvl = training_data['cusparse_v2_lvl'] cus2_nolvl = training_data['cusparse_v2_nolvl'] syncfree = training_data['syncfree'] algo_labels = {0:'MKL(seq)', 1:'MKL(par)', 2:'cuSPARSE(v1)', \ 3:'cuSPARSE(v2)(level-sch.)',4:'cuSPARSE(v2)(no level-sch.)',5:'Sync-Free'} Gain_vs_MKL_seq = [] Gain_vs_MKL_par = [] Gain_vs_cus1 = [] Gain_vs_cus2_lvl = [] Gain_vs_cus2_nolvl = [] Gain_vs_syncfree = [] i = 0 for val in cross_validate_pred: if val == 1: predicted_time = MKL_seq[i] if val == 2: predicted_time = MKL_par[i] if val == 3: predicted_time = cus1[i] if val == 4: predicted_time = cus2_lvl[i] if val == 5: predicted_time = cus2_nolvl[i] if val == 6: predicted_time = syncfree[i] Gain_vs_MKL_seq.append(MKL_seq[i]/predicted_time) Gain_vs_MKL_par.append(MKL_par[i]/predicted_time) Gain_vs_cus1.append(cus1[i]/predicted_time) Gain_vs_cus2_lvl.append(cus2_lvl[i]/predicted_time) Gain_vs_cus2_nolvl.append(cus2_nolvl[i]/predicted_time) Gain_vs_syncfree.append(syncfree[i]/predicted_time) i = i + 1 predicted_speedup=[] predicted_speedup.append(Gain_vs_MKL_seq) predicted_speedup.append(Gain_vs_MKL_par) predicted_speedup.append(Gain_vs_cus1) predicted_speedup.append(Gain_vs_cus2_lvl) predicted_speedup.append(Gain_vs_cus2_nolvl) predicted_speedup.append(Gain_vs_syncfree) speedup_g2 = [] speedup_l1 = [] counter = 0 counter_l = 0 counter_l95 = 0 for i in range(6): for x in predicted_speedup[i]: if x >= 1: counter = counter + 1 if x < 1: counter_l = counter_l + 1 if x < 0.95: counter_l95 = counter_l95 + 1 speedup_g2.append(counter/998*100) speedup_l1.append(counter_l/998*100) counter = 0 counter_l = 0 counter_l95 = 0 sns.set(font_scale=1.0) sns.set_style("white") fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(10, 4.5)) fig.set_rasterized(True) k = 0 for i in range(2): for j in range(3): #my_bins = [0,1,2,3,4,5,6,7,8,9,10,20,30,40,50,60,int(np.max(predicted_speedup[k]))] max_ps = np.max(predicted_speedup[k]) my_bins = np.arange(0, 75) clrs=['#CB4335' if (x < 1) else '#2874A6' for x in my_bins] plot = sns.distplot(predicted_speedup[k], \ bins=my_bins, ax=ax[i][j],kde=False) sns.color_palette("husl", 8) ax1 = plot.axes for rec, clr in zip(ax1.patches, clrs): rec.set_color(clr) props = dict(boxstyle='round', facecolor='none', alpha=0.5) ax1.text(0.55, 0.70, ">=1: %.1f%%"%(speedup_g2[k]), transform=ax1.transAxes, fontsize=12, verticalalignment='top', bbox=props) ax1.text(0.55, 0.85, "Mean: %.1f"%(sp.stats.hmean(predicted_speedup[k])), transform=ax1.transAxes, fontsize=12, verticalalignment='top', bbox=props) z_critical = sp.stats.norm.ppf(q = 0.95) # Get the z-critical value* pop_stdev = np.std(predicted_speedup[k]) hmean = sp.stats.hmean(predicted_speedup[k]) mean_m_x = [(hmean-x) for x in predicted_speedup] mean_m_x = [np.sqrt(x*x) for x in mean_m_x] sample_size = len(predicted_speedup[k]) h_std = np.sum(mean_m_x)/sample_size margin_of_error = z_critical * (pop_stdev/np.sqrt(sample_size)) plot.set_yscale("log") #if k >= 3: plot.set_xlabel("Speedup") plot.set_title(algo_labels[k],loc="left") if k == 0 or k == 3: plot.set_ylabel('Number of matrices') k = k + 1 plt.tight_layout() warnings.filterwarnings("ignore") with warnings.catch_warnings(): fig.savefig('./datasets/figure9.pdf',bbox_inches='tight',rasterized=True) print("Figure 9 saved in datasets as figure9.eps") print("Note: Statistics can slightly vary from Figure 9 and from run-to-run") #plt.show() def Overheads(self, filename_training, filename_overhead): training_data=pd.read_csv(filename_training) overhead_data=pd.read_csv(filename_overhead) FE_wo_ilu = overhead_data['FE_oh_wo'] # Feature extraction (FE) overhead without ILU factorization time included FE_w_ilu = overhead_data['FE_oh_w'] # Feature extraction (FE) ovheread with ILU factorization time included m=overhead_data['m'] # Number of rows MKL_seq = training_data['mkl_seq'] MKL_par = training_data['mkl_par'] cus1 = training_data['cusparse_v1'] cus2_lvl = training_data['cusparse_v2_lvl'] cus2_nolvl = training_data['cusparse_v2_nolvl'] syncfree = training_data['syncfree'] seed = 250 precision = 'precision_weighted' recall = 'recall_weighted' f1_score = 'f1_weighted' scoring = [precision, recall,f1_score] X = training_data.drop(['mkl_seq','mkl_par','cusparse_v1','cusparse_v2_lvl','cusparse_v2_nolvl','syncfree','winner','CPU winner','GPU winner','2nd','3rd','4th','5th','6th'], axis=1) y = training_data['winner'] sc = StandardScaler() X_scaled = sc.fit_transform(X) X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.25, random_state=44) rfc_algo_selection = RandomForestClassifier(n_estimators=300) rfc_algo_selection.fit(X_train, y_train) kfold = model_selection.KFold(n_splits=10) cross_validate_pred = model_selection.cross_val_predict(rfc_algo_selection, X_scaled, y, cv=kfold) L_calls_vs_FE_wo_100K = [] # FE overhead in terms of lower triangular solve iterations without ILU factorization time included for matrices upto 100K rows L_calls_vs_FE_w_100K = [] # FE overhead in terms of lower triangular solve iterations with ILU factorization time included for matrices upto 100K rows L_calls_vs_FE_wo_1000K = [] # FE overhead in terms of lower triangular solve iterations without ILU factorization time included for matrices from 100K-1000K rows L_calls_vs_FE_w_1000K = [] # FE overhead in terms of lower triangular solve iterations with ILU factorization time included for matrices from 100K-1000K rows L_calls_vs_FE_wo_g1000K = [] # FE overhead in terms of lower triangular solve iterations without ILU factorization time included for matrices > 1000K rows L_calls_vs_FE_w_g1000K = [] # FE overhead in terms of lower triangular solve iterations with ILU factorization time included for matrices > 1000K rows oh_FE_wo_100K = [] # FE overhead without ILU factorization time included for matrices upto 100K oh_FE_w_100K = [] # FE overhead with ILU factorization time included for matrices upto 100K oh_FE_wo_1000K = [] # FE overhead without ILU factorization time included for matrices upto 100K-1000K oh_FE_w_1000K = [] # FE overhead with ILU factorization time included for matrices upto 100K-1000K oh_FE_wo_g1000K = [] # FE overhead without ILU factorization time included for matrices > 1000K oh_FE_w_g1000K = [] # FE overhead without ILU factorization time included for matrices > 1000K oh_MKLs_wo_100K = [] # MKL(ser) overhead without ILU factorization time included for matrices upto 100K oh_MKLs_w_100K = [] # MKL(ser) overhead with ILU factorization time included for matrices upto 100K oh_MKLp_wo_100K = [] # MKL(par) overhead without ILU factorization time included for matrices upto 100K oh_MKLp_w_100K = [] # MKL(par) overhead with ILU factorization time included for matrices upto 100K oh_CUS1_wo_100K = [] # cuSPARSE(v1) overhead without ILU factorization time included for matrices upto 100K oh_CUS1_w_100K = [] # cuSPARSE(v1) overhead with ILU factorization time include for matrices upto 100K oh_CUS2lvl_wo_100K = [] # cuSPARSE(v2)(level-sch.) overhead without ILU factorization time included for matrices upto 100K oh_CUS2lvl_w_100K = [] # cuSPARSE(v2)(level-sch.) overhead with ILU factorization time included for matrices upto 100K oh_CUS2nolvl_wo_100K = [] # cuSPARSE(v2)(no level-sch.) overhead without ILU factorization time included for matrices upto 100K oh_CUS2nolvl_w_100K = [] # cuSPARSE(v2)(level-sch.) overhead with ILU factorization time included for matrices upto 100K oh_SyncFree_wo_100K = [] # SyncFree overhead without ILU factorization time included for matrices upto 100K oh_SyncFree_w_100K = [] # cuSPARSE(v2)(level-sch.) overhead with ILU factorization time included for matrices upto 100K oh_MKLs_wo_1000K = [] # MKL(ser) overhead without ILU factorization time included for matrices from 100K-1000K oh_MKLs_w_1000K = [] # MKL(ser) overhead with ILU factorization time included for matrices from 100K-1000K oh_MKLp_wo_1000K = [] # MKL(par) overhead without ILU factorization time included for matrices from 100K-1000K oh_MKLp_w_1000K = [] # MKL(par) overhead with ILU factorization time included for matrices from 100K-1000K oh_CUS1_wo_1000K = [] # cuSPARSE(v1) overhead without ILU factorization time included for matrices from 100K-1000K oh_CUS1_w_1000K = [] # cuSPARSE(v1) overhead with ILU factorization time include for matrices from 100K-1000K oh_CUS2lvl_wo_1000K = [] # cuSPARSE(v2)(level-sch.) overhead without ILU factorization time included for matrices from 100K-1000K oh_CUS2lvl_w_1000K = [] # cuSPARSE(v2)(level-sch.) overhead with ILU factorization time included for matrices from 100K-1000K oh_CUS2nolvl_wo_1000K = [] # cuSPARSE(v2)(no level-sch.) overhead without ILU factorization time included for matrices from 100K-1000K oh_CUS2nolvl_w_1000K = [] # cuSPARSE(v2)(level-sch.) overhead with ILU factorization time included for matrices from 100K-1000K oh_SyncFree_wo_1000K = [] # SyncFree overhead without ILU factorization time included for matrices from 100K-1000K oh_SyncFree_w_1000K = [] # cuSPARSE(v2)(level-sch.) overhead with ILU factorization time included for matrices from 100K-1000K oh_MKLs_wo_g1000K = [] # MKL(ser) overhead without ILU factorization time included for matrices > 1000K oh_MKLs_w_g1000K = [] # MKL(ser) overhead with ILU factorization time included for matrices > 1000K oh_MKLp_wo_g1000K = [] # MKL(par) overhead without ILU factorization time included for matrices > 1000K oh_MKLp_w_g1000K = [] # MKL(par) overhead with ILU factorization time included for matrices > 1000K oh_CUS1_wo_g1000K = [] # cuSPARSE(v1) overhead without ILU factorization time included for matrices > 1000K oh_CUS1_w_g1000K = [] # cuSPARSE(v1) overhead with ILU factorization time include for matrices > 1000K oh_CUS2lvl_wo_g1000K = [] # cuSPARSE(v2)(level-sch.) overhead without ILU factorization time included for matrices > 1000K oh_CUS2lvl_w_g1000K = [] # cuSPARSE(v2)(level-sch.) overhead with ILU factorization time included for matrices > 1000K oh_CUS2nolvl_wo_g1000K = [] # cuSPARSE(v2)(no level-sch.) overhead without ILU factorization time included for matrices > 1000K oh_CUS2nolvl_w_g1000K = [] # cuSPARSE(v2)(level-sch.) overhead with ILU factorization time included for matrices > 1000K oh_SyncFree_wo_g1000K = [] # SyncFree overhead without ILU factorization time included for matrices > 1000K oh_SyncFree_w_g1000K = [] # cuSPARSE(v2)(level-sch.) overhead with ILU factorization time included for matrices > 1000K oh_MKLs_wo_100K_ana = [] # MKL(ser) algorithm analysis overhead without ILU factorization time included for matrices upto 100K oh_MKLs_w_100K_ana = [] # MKL(ser) algorithm analysis overhead with ILU factorization time included for matrices upto 100K oh_MKLp_wo_100K_ana = [] # MKL(par) algorithm analysis overhead without ILU factorization time included for matrices upto 100K oh_MKLp_w_100K_ana = [] # MKL(par) algorithm analysis overhead with ILU factorization time included for matrices upto 100K oh_CUS1_wo_100K_ana = [] # cuSPARSE(v1) algorithm analysis overhead without ILU factorization time included for matrices upto 100K oh_CUS1_w_100K_ana = [] # cuSPARSE(v1) algorithm analysis overhead with ILU factorization time include for matrices upto 100K oh_CUS2lvl_wo_100K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead without ILU factorization time included for matrices upto 100K oh_CUS2lvl_w_100K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead with ILU factorization time included for matrices upto 100K oh_CUS2nolvl_wo_100K_ana = [] # cuSPARSE(v2)(no level-sch.) algorithm analysis overhead without ILU factorization time included for matrices upto 100K oh_CUS2nolvl_w_100K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead with ILU factorization time included for matrices upto 100K oh_SyncFree_wo_100K_ana = [] # SyncFree algorithm analysis overhead without ILU factorization time included for matrices upto 100K oh_SyncFree_w_100K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead with ILU factorization time included for matrices upto 100K oh_MKLs_wo_1000K_ana = [] # MKL(ser) algorithm analysis overhead without ILU factorization time included for matrices from 100K-1000K oh_MKLs_w_1000K_ana = [] # MKL(ser) algorithm analysis overhead with ILU factorization time included for matrices from 100K-1000K oh_MKLp_wo_1000K_ana = [] # MKL(par) algorithm analysis overhead without ILU factorization time included for matrices from 100K-1000K oh_MKLp_w_1000K_ana = [] # MKL(par) algorithm analysis overhead with ILU factorization time included for matrices from 100K-1000K oh_CUS1_wo_1000K_ana = [] # cuSPARSE(v1) algorithm analysis overhead without ILU factorization time included for matrices from 100K-1000K oh_CUS1_w_1000K_ana = [] # cuSPARSE(v1) algorithm analysis overhead with ILU factorization time include for matrices from 100K-1000K oh_CUS2lvl_wo_1000K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead without ILU factorization time included for matrices from 100K-1000K oh_CUS2lvl_w_1000K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead with ILU factorization time included for matrices from 100K-1000K oh_CUS2nolvl_wo_1000K_ana = [] # cuSPARSE(v2)(no level-sch.) algorithm analysis overhead without ILU factorization time included for matrices from 100K-1000K oh_CUS2nolvl_w_1000K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead with ILU factorization time included for matrices from 100K-1000K oh_SyncFree_wo_1000K_ana = [] # SyncFree algorithm analysis overhead without ILU factorization time included for matrices from 100K-1000K oh_SyncFree_w_1000K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead with ILU factorization time included for matrices from 100K-1000K oh_MKLs_wo_g1000K_ana = [] # MKL(ser) algorithm analysis overhead without ILU factorization time included for matrices > 1000K oh_MKLs_w_g1000K_ana = [] # MKL(ser) algorithm analysis overhead with ILU factorization time included for matrices > 1000K oh_MKLp_wo_g1000K_ana = [] # MKL(par) algorithm analysis overhead without ILU factorization time included for matrices > 1000K oh_MKLp_w_g1000K_ana = [] # MKL(par) algorithm analysis overhead with ILU factorization time included for matrices > 1000K oh_CUS1_wo_g1000K_ana = [] # cuSPARSE(v1) algorithm analysis overhead without ILU factorization time included for matrices > 1000K oh_CUS1_w_g1000K_ana = [] # cuSPARSE(v1) algorithm analysis overhead with ILU factorization time include for matrices > 1000K oh_CUS2lvl_wo_g1000K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead without ILU factorization time included for matrices > 1000K oh_CUS2lvl_w_g1000K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead with ILU factorization time included for matrices > 1000K oh_CUS2nolvl_wo_g1000K_ana = [] # cuSPARSE(v2)(no level-sch.) algorithm analysis overhead without ILU factorization time included for matrices > 1000K oh_CUS2nolvl_w_g1000K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead with ILU factorization time included for matrices > 1000K oh_SyncFree_wo_g1000K_ana = [] # SyncFree algorithm analysis overhead without ILU factorization time included for matrices > 1000K oh_SyncFree_w_g1000K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead with ILU factorization time included for matrices > 1000K emp_oh_wo_100K = 0 # Empirical execution overhead without ILU factorization time included for matrices upto 100K emp_oh_wo_1000k = 0 # Empirical execution overhead without ILU factorization time included for matrices from 100K-1000K emp_oh_wo_g1000k = 0 # Empirical execution overhead without ILU factorization time included for matrices > 1000K emp_oh_w_100K = 0 # Empirical execution overhead with ILU factorization time included for matrices upto 100K emp_oh_w_1000k = 0 # Empirical execution overhead with ILU factorization time included for matrices from 100K-1000K emp_oh_w_g1000k = 0 # Empirical execution overhead with ILU factorization time included for matrices > 1000K i = 0 for val in cross_validate_pred: if val == 1: predicted_time = MKL_seq[i] if val == 2: predicted_time = MKL_par[i] if val == 3: predicted_time = cus1[i] if val == 4: predicted_time = cus2_lvl[i] if val == 5: predicted_time = cus2_nolvl[i] if val == 6: predicted_time = syncfree[i] if m[i] < 100000: L_calls_vs_FE_wo_100K.append(FE_wo_ilu[i]*1000/predicted_time) L_calls_vs_FE_w_100K.append(FE_w_ilu[i]*1000/predicted_time) oh_MKLs_wo_100K.append((overhead_data['MKL(seq) Ana'][i]+overhead_data['MKL(seq) 10 iter'][i])) oh_MKLs_w_100K.append((overhead_data['MKL(seq) Ana'][i]+overhead_data['MKL(seq) 10 iter'][i]+\ overhead_data['MKL(seq) ilu'][i])) oh_MKLp_wo_100K.append((overhead_data['MKL(par) Ana'][i]+overhead_data['MKL(par) 10 iter'][i])) oh_MKLp_w_100K.append((overhead_data['MKL(par) Ana'][i]+overhead_data['MKL(par) 10 iter'][i]+\ overhead_data['MKL(par) ilu'][i])) oh_CUS1_wo_100K.append((overhead_data['cuSPARSE(v1) ana'][i]+overhead_data['cuSPARSE(v1) 10 iter'][i])) oh_CUS1_w_100K.append((overhead_data['cuSPARSE(v1) ana'][i]+overhead_data['cuSPARSE(v1) 10 iter'][i]+\ overhead_data['cuSPARSE(v1) ilu'][i])) oh_CUS2lvl_wo_100K.append((overhead_data['cusparse(v2)ana'][i]+overhead_data['cuSPARSE(v2)lvl'][i])) oh_CUS2lvl_w_100K.append((overhead_data['cusparse(v2)ana'][i]+overhead_data['cuSPARSE(v2)lvl'][i]+\ +overhead_data['cuSPARSE(v2)iluAna'][i]+overhead_data['cuSPARSE(v2)iu'][i])) oh_CUS2nolvl_wo_100K.append((overhead_data['cuSPARSE(v2)nolvl 10 iter'][i])) oh_CUS2nolvl_w_100K.append((overhead_data['cuSPARSE(v2)nolvl 10 iter'][i])) oh_SyncFree_wo_100K.append((overhead_data['Sync-Free ana'][i]+overhead_data['Sync-Free 10 iter'][i])) oh_SyncFree_w_100K.append((overhead_data['SycnFree_LU'][i]+overhead_data['Sync-Free ana'][i]+\ overhead_data['Sync-Free 10 iter'][i])) oh_FE_wo_100K.append(overhead_data['FE_oh_wo'][i]) oh_FE_w_100K.append(overhead_data['FE_oh_w'][i]) oh_MKLs_wo_100K_ana.append((overhead_data['MKL(seq) Ana'][i])) oh_MKLs_w_100K_ana.append((overhead_data['MKL(seq) Ana'][i]+overhead_data['MKL(seq) ilu'][i])) oh_MKLp_wo_100K_ana.append((overhead_data['MKL(par) Ana'][i])) oh_MKLp_w_100K_ana.append((overhead_data['MKL(par) Ana'][i]+overhead_data['MKL(par) ilu'][i])) oh_CUS1_wo_100K_ana.append((overhead_data['cuSPARSE(v1) ana'][i])) oh_CUS1_w_100K_ana.append((overhead_data['cuSPARSE(v1) ana'][i]+overhead_data['cuSPARSE(v1) ilu'][i])) oh_CUS2lvl_wo_100K_ana.append((overhead_data['cusparse(v2)ana'][i])) oh_CUS2lvl_w_100K_ana.append((overhead_data['cusparse(v2)ana'][i]+\ overhead_data['cuSPARSE(v2)iluAna'][i]+overhead_data['cuSPARSE(v2)iu'][i])) oh_CUS2nolvl_wo_100K_ana.append(0) oh_CUS2nolvl_w_100K_ana.append(0) oh_SyncFree_wo_100K_ana.append((overhead_data['Sync-Free ana'][i])) oh_SyncFree_w_100K_ana.append((overhead_data['SycnFree_LU'][i]+overhead_data['Sync-Free ana'][i])) if m[i] >= 100000 and m[i] < 1000000: L_calls_vs_FE_wo_1000K.append(FE_wo_ilu[i]*1000/predicted_time) L_calls_vs_FE_w_1000K.append(FE_w_ilu[i]*1000/predicted_time) oh_MKLs_wo_1000K.append((overhead_data['MKL(seq) Ana'][i]+overhead_data['MKL(seq) 10 iter'][i])) oh_MKLs_w_1000K.append((overhead_data['MKL(seq) Ana'][i]+overhead_data['MKL(seq) 10 iter'][i]+\ overhead_data['MKL(seq) ilu'][i])) oh_MKLp_wo_1000K.append((overhead_data['MKL(par) Ana'][i]+overhead_data['MKL(par) 10 iter'][i])) oh_MKLp_w_1000K.append((overhead_data['MKL(par) Ana'][i]+overhead_data['MKL(par) 10 iter'][i]+\ overhead_data['MKL(par) ilu'][i])) oh_CUS1_wo_1000K.append((overhead_data['cuSPARSE(v1) ana'][i]+\ overhead_data['cuSPARSE(v1) 10 iter'][i])) oh_CUS1_w_1000K.append((overhead_data['cuSPARSE(v1) ana'][i]+\ overhead_data['cuSPARSE(v1) 10 iter'][i]+overhead_data['cuSPARSE(v1) ilu'][i])) oh_CUS2lvl_wo_1000K.append((overhead_data['cusparse(v2)ana'][i]+overhead_data['cuSPARSE(v2)lvl'][i])) oh_CUS2lvl_w_1000K.append((overhead_data['cusparse(v2)ana'][i]+\ overhead_data['cuSPARSE(v2)lvl'][i]+\ overhead_data['cuSPARSE(v2)iluAna'][i]+overhead_data['cuSPARSE(v2)iu'][i])) oh_CUS2nolvl_wo_1000K.append((overhead_data['cuSPARSE(v2)nolvl 10 iter'][i])) oh_CUS2nolvl_w_1000K.append((overhead_data['cuSPARSE(v2)nolvl 10 iter'][i])) oh_SyncFree_wo_1000K.append((overhead_data['Sync-Free ana'][i]+overhead_data['Sync-Free 10 iter'][i])) oh_SyncFree_w_1000K.append((overhead_data['SycnFree_LU'][i]+\ overhead_data['Sync-Free ana'][i]+overhead_data['Sync-Free 10 iter'][i])) oh_FE_wo_1000K.append((overhead_data['FE_oh_wo'][i])) oh_FE_w_1000K.append((overhead_data['FE_oh_w'][i])) oh_MKLs_wo_1000K_ana.append((overhead_data['MKL(seq) Ana'][i])) oh_MKLs_w_1000K_ana.append((overhead_data['MKL(seq) Ana'][i]+overhead_data['MKL(seq) ilu'][i])) oh_MKLp_wo_1000K_ana.append((overhead_data['MKL(par) Ana'][i])) oh_MKLp_w_1000K_ana.append((overhead_data['MKL(par) Ana'][i]+overhead_data['MKL(par) ilu'][i])) oh_CUS1_wo_1000K_ana.append((overhead_data['cuSPARSE(v1) ana'][i])) oh_CUS1_w_1000K_ana.append((overhead_data['cuSPARSE(v1) ana'][i]+overhead_data['cuSPARSE(v1) ilu'][i])) oh_CUS2lvl_wo_1000K_ana.append((overhead_data['cusparse(v2)ana'][i])) oh_CUS2lvl_w_1000K_ana.append((overhead_data['cusparse(v2)ana'][i]+\ overhead_data['cuSPARSE(v2)iluAna'][i]+\ overhead_data['cuSPARSE(v2)iu'][i])) oh_CUS2nolvl_wo_1000K_ana.append(0) oh_CUS2nolvl_w_1000K_ana.append(0) oh_SyncFree_wo_1000K_ana.append((overhead_data['Sync-Free ana'][i])) oh_SyncFree_w_1000K_ana.append((overhead_data['SycnFree_LU'][i]+overhead_data['Sync-Free ana'][i])) #emp_oh_wo_1000K.append(oh_MKLs_wo_1000K[i]+oh_MKLp_wo_1000K[i]+oh_CUS1_wo_1000K[i]+oh_CUS2lvl_wo_1000K[i]+oh_CUS2nolvl_wo_1000K[i]+oh_SyncFree_wo_1000K[i]) if m[i] >= 1000000: L_calls_vs_FE_wo_g1000K.append(FE_wo_ilu[i]*1000/predicted_time) L_calls_vs_FE_w_g1000K.append(FE_w_ilu[i]*1000/predicted_time) oh_MKLs_wo_g1000K.append((overhead_data['MKL(seq) Ana'][i])) oh_MKLs_w_g1000K.append((overhead_data['MKL(seq) Ana'][i]+overhead_data['MKL(seq) ilu'][i])) oh_MKLp_wo_g1000K.append((overhead_data['MKL(par) Ana'][i])) oh_MKLp_w_g1000K.append((overhead_data['MKL(par) Ana'][i]+overhead_data['MKL(par) ilu'][i])) oh_CUS1_wo_g1000K.append((overhead_data['cuSPARSE(v1) ana'][i]+overhead_data['cuSPARSE(v1) 10 iter'][i])) oh_CUS1_w_g1000K.append((overhead_data['cuSPARSE(v1) ana'][i]+overhead_data['cuSPARSE(v1) ilu'][i]+overhead_data['cuSPARSE(v1) 10 iter'][i])) oh_CUS2lvl_wo_g1000K.append((overhead_data['cusparse(v2)ana'][i]+overhead_data['cuSPARSE(v2)lvl'][i])) oh_CUS2lvl_w_g1000K.append((overhead_data['cusparse(v2)ana'][i]+overhead_data['cuSPARSE(v1) ilu'][i]+\ overhead_data['cuSPARSE(v2)iluAna'][i]+overhead_data['cuSPARSE(v2)iu'][i])) oh_CUS2nolvl_wo_g1000K.append((0)) oh_CUS2nolvl_w_g1000K.append((0)) oh_SyncFree_wo_g1000K.append((overhead_data['Sync-Free ana'][i])) oh_SyncFree_w_g1000K.append((overhead_data['SycnFree_LU'][i]+overhead_data['Sync-Free ana'][i])) oh_FE_wo_g1000K.append(overhead_data['FE_oh_wo'][i]) oh_FE_w_g1000K.append(overhead_data['FE_oh_w'][i]) oh_MKLs_wo_g1000K_ana.append((overhead_data['MKL(seq) Ana'][i])) oh_MKLs_w_g1000K_ana.append((overhead_data['MKL(seq) Ana'][i]+overhead_data['MKL(seq) ilu'][i])) oh_MKLp_wo_g1000K_ana.append((overhead_data['MKL(par) Ana'][i])) oh_MKLp_w_g1000K_ana.append((overhead_data['MKL(par) Ana'][i]+overhead_data['MKL(par) ilu'][i])) oh_CUS1_wo_g1000K_ana.append((overhead_data['cuSPARSE(v1) ana'][i])) oh_CUS1_w_g1000K_ana.append((overhead_data['cuSPARSE(v1) ana'][i]+overhead_data['cuSPARSE(v1) ilu'][i])) oh_CUS2lvl_wo_g1000K_ana.append((overhead_data['cusparse(v2)ana'][i])) oh_CUS2lvl_w_g1000K_ana.append((overhead_data['cusparse(v2)ana'][i]+overhead_data['cuSPARSE(v2)lvl'][i]+\ overhead_data['cuSPARSE(v1) ilu'][i]+overhead_data['cuSPARSE(v2)iluAna'][i]+\ overhead_data['cuSPARSE(v2)iu'][i])) oh_CUS2nolvl_wo_g1000K_ana.append(0) oh_CUS2nolvl_w_g1000K_ana.append(0) oh_SyncFree_wo_g1000K_ana.append((overhead_data['Sync-Free ana'][i])) oh_SyncFree_w_g1000K_ana.append((overhead_data['SycnFree_LU'][i]+overhead_data['Sync-Free ana'][i])) #emp_oh_wo_g1000K.append(oh_MKLs_wo_g1000K[i] + oh_MKLp_wo_g1000K[i] + oh_CUS1_wo_g1000K[i] + oh_CUS2lvl_wo_g1000K[i] + oh_CUS2nolvl_wo_g1000K[i] + oh_SyncFree_wo_g1000K[i]) i = i + 1 emp_oh_wo_100K = (np.sum(oh_MKLs_wo_100K)+np.sum(oh_MKLp_wo_100K)+np.sum(oh_CUS1_wo_100K) + \ np.sum(oh_CUS2lvl_wo_100K) + np.sum(oh_CUS2nolvl_wo_100K) + np.sum(oh_SyncFree_wo_100K))\ /(len(oh_MKLs_wo_100K)*1000) emp_oh_wo_1000K = (np.sum(oh_MKLs_wo_1000K)+np.sum(oh_MKLp_wo_1000K)+np.sum(oh_CUS1_wo_1000K) + \ np.sum(oh_CUS2lvl_wo_1000K) + np.sum(oh_CUS2nolvl_wo_1000K) + np.sum(oh_SyncFree_wo_1000K))\ /(len(oh_MKLs_wo_1000K)*1000) emp_oh_wo_g1000K = (np.sum(oh_MKLs_wo_g1000K)+np.sum(oh_MKLp_wo_g1000K)+np.sum(oh_CUS1_wo_g1000K) + \ np.sum(oh_CUS2lvl_wo_g1000K) + np.sum(oh_CUS2nolvl_wo_g1000K) + np.sum(oh_SyncFree_wo_g1000K))\ /(len(oh_MKLs_wo_g1000K)*1000) emp_oh_w_100K = (np.sum(oh_MKLs_w_100K)+np.sum(oh_MKLp_w_100K)+np.sum(oh_CUS1_w_100K) + \ np.sum(oh_CUS2lvl_w_100K) + np.sum(oh_CUS2nolvl_w_100K) + np.sum(oh_SyncFree_w_100K))/(len(oh_MKLs_w_100K)*1000) emp_oh_w_1000K = (np.sum(oh_MKLs_w_1000K)+np.sum(oh_MKLp_w_1000K)+np.sum(oh_CUS1_w_1000K) + \ np.sum(oh_CUS2lvl_w_1000K) + np.sum(oh_CUS2nolvl_w_1000K) + np.sum(oh_SyncFree_w_1000K))\ /(len(oh_MKLs_w_1000K)*1000) emp_oh_w_g1000K = (np.sum(oh_MKLs_w_g1000K)+np.sum(oh_MKLp_w_g1000K)+np.sum(oh_CUS1_w_g1000K) + \ np.sum(oh_CUS2lvl_w_g1000K) + np.sum(oh_CUS2nolvl_w_g1000K) + np.sum(oh_SyncFree_w_g1000K))\ /(len(oh_MKLs_w_g1000K)*1000) emp_oh_wo_g1000K_ana = (np.sum(oh_MKLs_wo_g1000K_ana)+np.sum(oh_MKLp_wo_g1000K_ana)+np.sum(oh_CUS1_wo_g1000K_ana) + \ np.sum(oh_CUS2lvl_wo_g1000K_ana) + np.sum(oh_CUS2nolvl_wo_g1000K_ana) + np.sum(oh_SyncFree_wo_g1000K_ana))\ /(len(oh_MKLs_wo_g1000K_ana)*1000) emp_oh_w_g1000K_ana = (np.sum(oh_MKLs_w_g1000K_ana)+np.sum(oh_MKLp_w_g1000K_ana)+np.sum(oh_CUS1_w_g1000K_ana) + \ np.sum(oh_CUS2lvl_w_g1000K_ana) + np.sum(oh_CUS2nolvl_w_g1000K_ana) + np.sum(oh_SyncFree_w_g1000K_ana))\ /(len(oh_MKLs_w_g1000K_ana)*1000) Overhead_wo_100K_bar = (np.sum(oh_FE_wo_100K)/len(oh_FE_wo_100K), emp_oh_wo_100K, \ np.sum(oh_MKLs_wo_100K_ana)/(len(oh_MKLs_wo_100K_ana)*1000),\ np.sum(oh_MKLp_wo_100K_ana)/(len(oh_MKLp_wo_100K_ana)*1000),\ np.sum(oh_CUS1_wo_100K_ana)/(len(oh_MKLs_wo_100K_ana)*1000),\ np.sum(oh_CUS2lvl_wo_100K_ana)/(len(oh_CUS2lvl_wo_100K_ana)*1000),\ np.sum(oh_CUS2lvl_wo_100K_ana)/(len(oh_CUS2lvl_wo_100K_ana)*1000),\ np.sum(oh_SyncFree_wo_100K_ana)/(len(oh_SyncFree_wo_100K_ana)*1000)) Overhead_w_100K_bar = (np.sum(oh_FE_w_100K)/len(oh_FE_w_100K), emp_oh_w_100K, \ np.sum(oh_MKLs_w_100K_ana)/(len(oh_MKLs_w_100K_ana)*1000),\ np.sum(oh_MKLp_w_100K_ana)/(len(oh_MKLp_w_100K_ana)*1000),\ np.sum(oh_CUS1_w_100K_ana)/(len(oh_CUS1_w_100K_ana)*1000),\ np.sum(oh_CUS2lvl_w_100K_ana)/(len(oh_CUS2lvl_w_100K_ana)*1000),\ np.sum(oh_CUS2lvl_w_100K_ana)/(len(oh_CUS2lvl_w_100K_ana)*1000),\ np.sum(oh_SyncFree_w_100K_ana)/(len(oh_SyncFree_w_100K_ana)*1000)) Overhead_wo_1000K_bar = (np.sum(oh_FE_wo_1000K)/len(oh_FE_wo_1000K), emp_oh_wo_1000K, \ np.sum(oh_MKLs_wo_1000K_ana)/(len(oh_MKLs_wo_1000K_ana)*1000),\ np.sum(oh_MKLp_wo_1000K_ana)/(len(oh_MKLp_wo_1000K_ana)*1000),\ np.sum(oh_CUS1_wo_1000K_ana)/(len(oh_MKLs_wo_1000K_ana)*1000),\ np.sum(oh_CUS2lvl_wo_1000K_ana)/(len(oh_CUS2lvl_wo_1000K_ana)*1000),\ np.sum(oh_CUS2lvl_wo_1000K_ana)/(len(oh_CUS2lvl_wo_1000K_ana)*1000),\ np.sum(oh_SyncFree_wo_1000K_ana)/(len(oh_SyncFree_wo_1000K_ana)*1000)) Overhead_w_1000K_bar = (np.sum(oh_FE_w_1000K)/len(oh_FE_w_1000K), emp_oh_w_1000K, \ np.sum(oh_MKLs_w_1000K_ana)/(len(oh_MKLs_w_1000K_ana)*1000),\ np.sum(oh_MKLp_w_1000K_ana)/(len(oh_MKLp_w_1000K_ana)*1000),\ np.sum(oh_CUS1_w_1000K_ana)/(len(oh_CUS1_w_1000K_ana)*1000),\ np.sum(oh_CUS2lvl_w_1000K_ana)/(len(oh_CUS2lvl_w_1000K_ana)*1000),\ np.sum(oh_CUS2lvl_w_1000K_ana)/(len(oh_CUS2lvl_w_1000K_ana)*1000),\ np.sum(oh_SyncFree_w_1000K_ana)/(len(oh_SyncFree_w_1000K_ana)*1000)) Overhead_wo_g1000K_bar = (np.sum(oh_FE_wo_g1000K)/len(oh_FE_wo_g1000K), emp_oh_wo_g1000K, \ np.sum(oh_MKLs_wo_g1000K_ana)/(len(oh_MKLs_wo_g1000K_ana)*1000),\ np.sum(oh_MKLp_wo_g1000K_ana)/(len(oh_MKLp_wo_g1000K_ana)*1000),\ np.sum(oh_CUS1_wo_g1000K_ana)/(len(oh_MKLs_wo_g1000K_ana)*1000),\ np.sum(oh_CUS2lvl_wo_g1000K_ana)/(len(oh_CUS2lvl_wo_g1000K_ana)*1000),\ np.sum(oh_CUS2lvl_wo_g1000K_ana)/(len(oh_CUS2lvl_wo_g1000K_ana)*1000),\ np.sum(oh_SyncFree_wo_g1000K_ana)/(len(oh_SyncFree_wo_g1000K_ana)*1000)) Overhead_w_g1000K_bar = (np.sum(oh_FE_w_g1000K)/len(oh_FE_w_g1000K), emp_oh_w_g1000K, \ np.sum(oh_MKLs_w_g1000K_ana)/(len(oh_MKLs_w_g1000K_ana)*1000),\
np.sum(oh_MKLp_w_g1000K_ana)
numpy.sum
from bin import Parameter import numpy as np from numpy.random import choice, random import os from bin.Util import normalization from sklearn.ensemble import RandomForestRegressor class Agent: def __init__(self, PnumActions, epsilon, inputDim, algorithm, Parrallel): self.projectionFunction = None self.loss = [] self.Actions = list(
np.arange(0, PnumActions, 1, np.int)
numpy.arange
import os import tempfile import numpy as np import scipy.ndimage.measurements as meas from functools import reduce import warnings import sys sys.path.append(os.path.abspath(r'../lib')) import NumCppPy as NumCpp # noqa E402 #################################################################################### def factors(n): return set(reduce(list.__add__, ([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0))) #################################################################################### def test_seed(): np.random.seed(1) #################################################################################### def test_abs(): randValue = np.random.randint(-100, -1, [1, ]).astype(np.double).item() assert NumCpp.absScaler(randValue) == np.abs(randValue) components = np.random.randint(-100, -1, [2, ]).astype(np.double) value = complex(components[0], components[1]) assert np.round(NumCpp.absScaler(value), 9) == np.round(np.abs(value), 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(-100, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.absArray(cArray), np.abs(data)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \ 1j * np.random.randint(-100, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.absArray(cArray), 9), np.round(np.abs(data), 9)) #################################################################################### def test_add(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray1 = NumCpp.NdArray(shape) cArray2 = NumCpp.NdArray(shape) data1 = np.random.randint(-100, 100, [shape.rows, shape.cols]) data2 = np.random.randint(-100, 100, [shape.rows, shape.cols]) cArray1.setArray(data1) cArray2.setArray(data2) assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(-100, 100, [shape.rows, shape.cols]) cArray.setArray(data) value = np.random.randint(-100, 100) assert np.array_equal(NumCpp.add(cArray, value), data + value) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(-100, 100, [shape.rows, shape.cols]) cArray.setArray(data) value = np.random.randint(-100, 100) assert np.array_equal(NumCpp.add(value, cArray), data + value) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray1 = NumCpp.NdArrayComplexDouble(shape) cArray2 = NumCpp.NdArrayComplexDouble(shape) real1 = np.random.randint(1, 100, [shape.rows, shape.cols]) imag1 = np.random.randint(1, 100, [shape.rows, shape.cols]) data1 = real1 + 1j * imag1 real2 = np.random.randint(1, 100, [shape.rows, shape.cols]) imag2 = np.random.randint(1, 100, [shape.rows, shape.cols]) data2 = real2 + 1j * imag2 cArray1.setArray(data1) cArray2.setArray(data2) assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100) assert np.array_equal(NumCpp.add(cArray, value), data + value) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100) assert np.array_equal(NumCpp.add(value, cArray), data + value) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray1 = NumCpp.NdArrayComplexDouble(shape) cArray2 = NumCpp.NdArray(shape) real1 = np.random.randint(1, 100, [shape.rows, shape.cols]) imag1 = np.random.randint(1, 100, [shape.rows, shape.cols]) data1 = real1 + 1j * imag1 data2 = np.random.randint(1, 100, [shape.rows, shape.cols]) cArray1.setArray(data1) cArray2.setArray(data2) assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray1 = NumCpp.NdArray(shape) cArray2 = NumCpp.NdArrayComplexDouble(shape) data1 = np.random.randint(1, 100, [shape.rows, shape.cols]) real2 = np.random.randint(1, 100, [shape.rows, shape.cols]) imag2 = np.random.randint(1, 100, [shape.rows, shape.cols]) data2 = real2 + 1j * imag2 cArray1.setArray(data1) cArray2.setArray(data2) assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(-100, 100, [shape.rows, shape.cols]) cArray.setArray(data) value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100) assert np.array_equal(NumCpp.add(cArray, value), data + value) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(-100, 100, [shape.rows, shape.cols]) cArray.setArray(data) value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100) assert np.array_equal(NumCpp.add(value, cArray), data + value) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) value = np.random.randint(-100, 100) assert np.array_equal(NumCpp.add(cArray, value), data + value) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) value = np.random.randint(-100, 100) assert np.array_equal(NumCpp.add(value, cArray), data + value) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray1 = NumCpp.NdArrayComplexDouble(shape) cArray2 = NumCpp.NdArrayComplexDouble(shape) real1 = np.random.randint(1, 100, [shape.rows, shape.cols]) imag1 = np.random.randint(1, 100, [shape.rows, shape.cols]) data1 = real1 + 1j * imag1 real2 = np.random.randint(1, 100, [shape.rows, shape.cols]) imag2 = np.random.randint(1, 100, [shape.rows, shape.cols]) data2 = real2 + 1j * imag2 cArray1.setArray(data1) cArray2.setArray(data2) assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2) #################################################################################### def test_alen(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(-100, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert NumCpp.alen(cArray) == shape.rows #################################################################################### def test_all(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item() shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item() shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1)) #################################################################################### def test_allclose(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray1 = NumCpp.NdArray(shape) cArray2 = NumCpp.NdArray(shape) cArray3 = NumCpp.NdArray(shape) tolerance = 1e-5 data1 = np.random.randn(shape.rows, shape.cols) data2 = data1 + tolerance / 10 data3 = data1 + 1 cArray1.setArray(data1) cArray2.setArray(data2) cArray3.setArray(data3) assert NumCpp.allclose(cArray1, cArray2, tolerance) and not NumCpp.allclose(cArray1, cArray3, tolerance) #################################################################################### def test_amax(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1)) #################################################################################### def test_amin(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1)) #################################################################################### def test_angle(): components = np.random.randint(-100, -1, [2, ]).astype(np.double) value = complex(components[0], components[1]) assert np.round(NumCpp.angleScaler(value), 9) == np.round(np.angle(value), 9) # noqa shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \ 1j * np.random.randint(-100, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.angleArray(cArray), 9), np.round(np.angle(data), 9)) #################################################################################### def test_any(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item() shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item() shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1)) #################################################################################### def test_append(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray1 = NumCpp.NdArray(shape) cArray2 = NumCpp.NdArray(shape) data1 = np.random.randint(0, 100, [shape.rows, shape.cols]) data2 = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray1.setArray(data1) cArray2.setArray(data2) assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(), np.append(data1, data2)) shapeInput = np.random.randint(20, 100, [2, ]) numRows = np.random.randint(1, 100, [1, ]).item() shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) shape2 = NumCpp.Shape(shapeInput[0].item() + numRows, shapeInput[1].item()) cArray1 = NumCpp.NdArray(shape1) cArray2 = NumCpp.NdArray(shape2) data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols]) data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols]) cArray1.setArray(data1) cArray2.setArray(data2) assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(), np.append(data1, data2, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) NumCppols = np.random.randint(1, 100, [1, ]).item() shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + NumCppols) cArray1 = NumCpp.NdArray(shape1) cArray2 = NumCpp.NdArray(shape2) data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols]) data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols]) cArray1.setArray(data1) cArray2.setArray(data2) assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(), np.append(data1, data2, axis=1)) #################################################################################### def test_arange(): start = np.random.randn(1).item() stop = np.random.randn(1).item() * 100 step = np.abs(np.random.randn(1).item()) if stop < start: step *= -1 data = np.arange(start, stop, step) assert np.array_equal(np.round(NumCpp.arange(start, stop, step).flatten(), 9), np.round(data, 9)) #################################################################################### def test_arccos(): value = np.abs(np.random.rand(1).item()) assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9) components = np.random.rand(2).astype(np.double) value = complex(components[0], components[1]) assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.rand(shape.rows, shape.cols) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9)) #################################################################################### def test_arccosh(): value = np.abs(np.random.rand(1).item()) + 1 assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9) components = np.random.rand(2).astype(np.double) value = complex(components[0], components[1]) assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.rand(shape.rows, shape.cols) + 1 cArray.setArray(data) assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9)) #################################################################################### def test_arcsin(): value = np.abs(np.random.rand(1).item()) assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9) components = np.random.rand(2).astype(np.double) value = complex(components[0], components[1]) assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.rand(shape.rows, shape.cols) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols) cArray.setArray(data) np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9)) #################################################################################### def test_arcsinh(): value = np.abs(np.random.rand(1).item()) assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9) components = np.random.rand(2).astype(np.double) value = complex(components[0], components[1]) assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.rand(shape.rows, shape.cols) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols) cArray.setArray(data) np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9)) #################################################################################### def test_arctan(): value = np.abs(np.random.rand(1).item()) assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9) components = np.random.rand(2).astype(np.double) value = complex(components[0], components[1]) assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.rand(shape.rows, shape.cols) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols) cArray.setArray(data) np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9)) #################################################################################### def test_arctan2(): xy = np.random.rand(2) * 2 - 1 assert np.round(NumCpp.arctan2Scaler(xy[1], xy[0]), 9) == np.round(np.arctan2(xy[1], xy[0]), 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArrayX = NumCpp.NdArray(shape) cArrayY = NumCpp.NdArray(shape) xy = np.random.rand(*shapeInput, 2) * 2 - 1 xData = xy[:, :, 0].reshape(shapeInput) yData = xy[:, :, 1].reshape(shapeInput) cArrayX.setArray(xData) cArrayY.setArray(yData) assert np.array_equal(np.round(NumCpp.arctan2Array(cArrayY, cArrayX), 9), np.round(np.arctan2(yData, xData), 9)) #################################################################################### def test_arctanh(): value = np.abs(np.random.rand(1).item()) assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9) components = np.random.rand(2).astype(np.double) value = complex(components[0], components[1]) assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.rand(shape.rows, shape.cols) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols) cArray.setArray(data) np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9)) #################################################################################### def test_argmax(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1)) #################################################################################### def test_argmin(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1)) #################################################################################### def test_argsort(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) dataFlat = data.flatten() assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)], dataFlat[np.argsort(data, axis=None)]) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) dataFlat = data.flatten() assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)], dataFlat[np.argsort(data, axis=None)]) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) pIdx = np.argsort(data, axis=0) cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16) allPass = True for idx, row in enumerate(data.T): if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]): allPass = False break assert allPass shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) pIdx = np.argsort(data, axis=0) cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16) allPass = True for idx, row in enumerate(data.T): if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]): allPass = False break assert allPass shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) pIdx = np.argsort(data, axis=1) cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16) allPass = True for idx, row in enumerate(data): if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]): # noqa allPass = False break assert allPass shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) pIdx = np.argsort(data, axis=1) cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16) allPass = True for idx, row in enumerate(data): if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]): allPass = False break assert allPass #################################################################################### def test_argwhere(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) randValue = np.random.randint(0, 100, [1, ]).item() data2 = data > randValue cArray.setArray(data2) assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten()) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag randValue = np.random.randint(0, 100, [1, ]).item() data2 = data > randValue cArray.setArray(data2) assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten()) #################################################################################### def test_around(): value = np.abs(np.random.rand(1).item()) * np.random.randint(1, 10, [1, ]).item() numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item() assert NumCpp.aroundScaler(value, numDecimalsRound) == np.round(value, numDecimalsRound) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.rand(shape.rows, shape.cols) * np.random.randint(1, 10, [1, ]).item() cArray.setArray(data) numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item() assert np.array_equal(NumCpp.aroundArray(cArray, numDecimalsRound), np.round(data, numDecimalsRound)) #################################################################################### def test_array_equal(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray1 = NumCpp.NdArray(shape) cArray2 = NumCpp.NdArray(shape) cArray3 = NumCpp.NdArray(shape) data1 = np.random.randint(1, 100, shapeInput) data2 = np.random.randint(1, 100, shapeInput) cArray1.setArray(data1) cArray2.setArray(data1) cArray3.setArray(data2) assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray1 = NumCpp.NdArrayComplexDouble(shape) cArray2 = NumCpp.NdArrayComplexDouble(shape) cArray3 = NumCpp.NdArrayComplexDouble(shape) real1 = np.random.randint(1, 100, [shape.rows, shape.cols]) imag1 = np.random.randint(1, 100, [shape.rows, shape.cols]) data1 = real1 + 1j * imag1 real2 = np.random.randint(1, 100, [shape.rows, shape.cols]) imag2 = np.random.randint(1, 100, [shape.rows, shape.cols]) data2 = real2 + 1j * imag2 cArray1.setArray(data1) cArray2.setArray(data1) cArray3.setArray(data2) assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3) #################################################################################### def test_array_equiv(): shapeInput1 = np.random.randint(1, 100, [2, ]) shapeInput3 = np.random.randint(1, 100, [2, ]) shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item()) shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item()) shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item()) cArray1 = NumCpp.NdArray(shape1) cArray2 = NumCpp.NdArray(shape2) cArray3 = NumCpp.NdArray(shape3) data1 = np.random.randint(1, 100, shapeInput1) data3 = np.random.randint(1, 100, shapeInput3) cArray1.setArray(data1) cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()])) cArray3.setArray(data3) assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3) shapeInput1 = np.random.randint(1, 100, [2, ]) shapeInput3 = np.random.randint(1, 100, [2, ]) shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item()) shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item()) shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item()) cArray1 = NumCpp.NdArrayComplexDouble(shape1) cArray2 = NumCpp.NdArrayComplexDouble(shape2) cArray3 = NumCpp.NdArrayComplexDouble(shape3) real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols]) imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols]) data1 = real1 + 1j * imag1 real3 = np.random.randint(1, 100, [shape3.rows, shape3.cols]) imag3 = np.random.randint(1, 100, [shape3.rows, shape3.cols]) data3 = real3 + 1j * imag3 cArray1.setArray(data1) cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()])) cArray3.setArray(data3) assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3) #################################################################################### def test_asarray(): values = np.random.randint(0, 100, [2, ]).astype(np.double) assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values) values = np.random.randint(0, 100, [2, ]).astype(np.double) assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values) values = np.random.randint(0, 100, [2, ]).astype(np.double) data = np.vstack([values, values]) assert np.array_equal(NumCpp.asarrayArray2D(*values), data) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag data = np.vstack([values, values]) assert np.array_equal(NumCpp.asarrayArray2D(*values), data) values = np.random.randint(0, 100, [2, ]).astype(np.double) data = np.vstack([values, values]) assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag data =
np.vstack([values, values])
numpy.vstack
""" Distributions for single variables. """ import math import copy import numpy as np import scipy.stats as sts from abc import ABC, abstractmethod from scipy.optimize import fmin __all__ = [ "WeibullDistribution", "LogNormalDistribution", "NormalDistribution", "ExponentiatedWeibullDistribution", "GeneralizedGammaDistribution", ] # The distributions parameters need to have an order, this order is defined by # the parameters dict. As of Python 3.7 dicts officially keep their order of creation. # So this version is a requirement. # (Though the dict order might work as well in 3.6) class ConditionalDistribution: """ A conditional probability distribution. The conditional distribution uses a Distribution as template and dynamically alters its parameters to model the dependence. The ConditionalDistribution wraps another distribution. When a method of the ConditionalDistribution is called it first computes the distributions parameters at given and then calls the corresponding method of the distribution with these parameters. Usually the parameters are defined by dependence functions of the form dep_func(given) -> param_val. Parameters ---------- distribution : Distribution The distribution used as template. Its parameters can be replaced with dependence functions to model the dependency. parameters: float A dictionary describing the parameters of distribution. The keys are the parameter names, the values are the dependence functions. Every parameter that is not fixed in distribution has to be set here. Attributes ---------- distribution_class : type The class of the distribution used. param_names : list-like Names of the parameters of the distribution. conditional_parameters : dict Dictionary of dependence functions for conditional parameters. Parameter names as keys. fixed_parameters : dict Values of the fixed parameters. The fixed parameters do not change, even when fitting them. Parameters as keys. distributions_per_interval : list Instances of distribution fitted to intervals parameters_per_interval : list of dict Values of the parameters of the distribution function. Parameter names as keys. data_intervals : list of array The data that was used to fit the distribution. Split into intervals. conditioning_values : array_like Realizations of the conditioning variable that where used for fitting. conditioning_interval_boundaries : list of tuple Boundaries of the intervals the data of the conditioning variable was split into. """ def __init__(self, distribution, parameters): # allow setting fitting initials on class creation? self.distribution = distribution self.distribution_class = distribution.__class__ self.param_names = distribution.parameters.keys() self.conditional_parameters = {} self.fixed_parameters = {} self.conditioning_values = None # TODO check that dependency functions are not duplicates # Check if the parameters dict contains keys/parameters that # are not known to the distribution. # (use set operations for that purpose) unknown_params = set(parameters).difference(self.param_names) if len(unknown_params) > 0: raise ValueError( "Unknown param(s) in parameters." f"Known params are {self.param_names}, " f"but found {unknown_params}." ) for par_name in self.param_names: # is the parameter defined as a dependence function? if par_name not in parameters: # if it is not a dependence function it must be fixed if getattr(distribution, f"f_{par_name}") is None: raise ValueError( "Parameters of the distribution must be " "either defined by a dependence function " f"or fixed, but {par_name} was not defined." ) else: self.fixed_parameters[par_name] = getattr( distribution, f"f_{par_name}" ) else: # if it is a dependence function it must not be fixed if getattr(distribution, f"f_{par_name}") is not None: raise ValueError( "Parameters can be defined by a " "dependence function or by being fixed. " f"But for parameter {par_name} both where given." ) else: self.conditional_parameters[par_name] = parameters[par_name] def __repr__(self): dist = "Conditional" + self.distribution_class.__name__ fixed_params = ", ".join( [ f"f_{par_name}={par_value}" for par_name, par_value in self.fixed_parameters.items() ] ) cond_params = ", ".join( [ f"{par_name}={repr(par_value)}" for par_name, par_value in self.conditional_parameters.items() ] ) combined_params = fixed_params + ", " + cond_params combined_params = combined_params.strip(", ") return f"{dist}({combined_params})" def _get_param_values(self, given): param_values = {} for par_name in self.param_names: if par_name in self.conditional_parameters.keys(): param_values[par_name] = self.conditional_parameters[par_name](given) else: param_values[par_name] = self.fixed_parameters[par_name] return param_values def pdf(self, x, given): """ Probability density function for the described random variable. With x, value(s) from the sample space of this random variable and given value(s) from the sample space of the conditioning random variable, pdf(x, given) returns the probability density function at x conditioned on given. Parameters ---------- x : array_like Points at which the pdf is evaluated. Shape: 1- dimensional. given : float or array_like The conditioning value of the conditioning variable i.e. the y in x|y. Shape: 1-dimensional. Same size as x. Returns ------- ndarray Probability densities at x conditioned on given. Shape: 1- dimensional. Same size as x. """ return self.distribution.pdf(x, **self._get_param_values(given)) def cdf(self, x, given): """ Cumulative distribution function for the described random variable. With x, a realization of this random variable and given a realisation of the conditioning random variable, cdf(x, given) returns the cumulative distribution function at x conditioned on given. Parameters ---------- x : array_like Points at which the cdf is evaluated. Shape: 1-dimensional. given : float or array_like The conditioning value of the conditioning variable i.e. the y in x|y. Shape: 1-dimensional. Same size as x. Returns ------- ndarray Cumulative distribution function evaluated at x. Shape: 1-dimensional. Same size as x. """ return self.distribution.cdf(x, **self._get_param_values(given)) def icdf(self, prob, given): """ Inverse cumulative distribution function. Calculate the inverse cumulative distribution function. Also known as quantile or percent-point function. With x, a realization of this random variable and given a realisation of the conditioning random variable, icdf(x, given) returns the inverse cumulative distribution function at x conditioned on given. Parameters ---------- prob : Probabilities for which the i_cdf is evaluated. Shape: 1-dimensional given : float or array_like The conditioning value of the conditioning variable i.e. the y in x|y. Shape: 1-dimensional. Same size as prob. Returns ------- ndarray or float Inverse cumulative distribution function evaluated at given probabilities conditioned on given. Shape: 1-dimensional. Same size as prob. """ return self.distribution.icdf(prob, **self._get_param_values(given)) def draw_sample(self, n, given): """ Draw a random sample of size n, conditioned on given. Parameters ---------- n : float Number of observations that shall be drawn. given : float or array_like The conditioning value of the conditioning variable i.e. the y in x|y. Shape: TODO Returns ------- ndarray or float Sample of the requested size conditioned on given. """ return self.distribution.draw_sample(n, **self._get_param_values(given)) def fit( self, data, conditioning_values, conditioning_interval_boundaries, method=None, weights=None, ): """ Fit statistical distribution to data. Method of estimating the parameters of a probability distribution to given data. Parameters ---------- data : list of array The data that should be used to fit the distribution. Realizations of the distributions variable split into intervals. One array for each interval containing the data in that interval. conditioning_values : array_like Realizations of the conditioning variable i.e. the y in x|y. One value for each interval in data. conditioning_interval_boundaries : list of tuple Boundaries of the intervals the data of the conditioning variable was split into. One 2-element tuple for each interval in data. method : str, optional The method used to fit the distributions (self.distribution) for each interval. Defaults to the distributions default. weights : The weights used to fit the distributions (self.distribution) for each interval, when method is 'wlsq' = weighted least squares. """ self.distributions_per_interval = [] self.parameters_per_interval = [] self.data_intervals = data self.conditioning_values = np.array(conditioning_values) self.conditioning_interval_boundaries = conditioning_interval_boundaries # Fit distribution to each interval. for interval_data in data: # dist = self.distribution_class() dist = copy.deepcopy(self.distribution) dist.fit(interval_data, method, weights) self.distributions_per_interval.append(dist) self.parameters_per_interval.append(dist.parameters) # Fit dependence functions. fitted_dependence_functions = {} for par_name, dep_func in self.conditional_parameters.items(): x = self.conditioning_values y = [params[par_name] for params in self.parameters_per_interval] dep_func.fit(x, y) fitted_dependence_functions[par_name] = dep_func self.conditional_parameters = fitted_dependence_functions class Distribution(ABC): """ Abstract base class for distributions. Models the probabilities of occurrence for different possible (environmental) events. """ def __repr__(self): dist_name = self.__class__.__name__ param_names = self.parameters.keys() set_params = {} for par_name in param_names: f_attr = getattr(self, f"f_{par_name}") if f_attr is not None: set_params[f"f_{par_name}"] = f_attr else: set_params[par_name] = getattr(self, par_name) params = ", ".join( [f"{par_name}={par_value}" for par_name, par_value in set_params.items()] ) return f"{dist_name}({params})" @property @abstractmethod def parameters(self): """ Parameters of the probability distribution. Dict of the form: {"<parameter_name>" : <parameter_value>, ...} """ return {} @abstractmethod def cdf(self, x, *args, **kwargs): """ Cumulative distribution function. """ @abstractmethod def pdf(self, x, *args, **kwargs): """ Probability density function. """ @abstractmethod def icdf(self, prob, *args, **kwargs): """ Inverse cumulative distribution function. """ @abstractmethod def draw_sample(self, n, *args, **kwargs): """ Draw a random sample of length n. """ def fit(self, data, method="mle", weights=None): """ Fit the distribution to the sampled data. data : array_like The observed data to fit the distribution. method : str, optional The method used for fitting. Defaults to 'mle' = maximum-likelihood estimation. Other options are 'lsq' / 'wlsq' for (weighted) least squares. weights : None, str, array_like, The weights to use for weighted least squares fitting. Ignored otherwise. Defaults to None = equal weights. Can be either an array_like with one weight for each point in data or a str. Valid options for str are: 'linear', 'quadratic', 'cubic'. """ if method.lower() == "mle": self._fit_mle(data) elif method.lower() == "lsq" or method.lower() == "wlsq": self._fit_lsq(data, weights) else: raise ValueError( f"Unknown fit method '{method}'. " "Only maximum likelihood estimation (keyword: mle) " "and (weighted) least squares (keyword: (w)lsq) are supported." ) @abstractmethod def _fit_mle(self, data): """Fit the distribution using maximum likelihood estimation.""" @abstractmethod def _fit_lsq(self, data, weights): """Fit the distribution using (weighted) least squares.""" @staticmethod def _get_rvs_size(n, pars): # Returns the size parameter for the scipy rvs method. # If there are any iterable pars it is a tuple, # otherwise n is returned. at_least_one_iterable = False par_length = 0 for par in pars: try: _ = iter(par) at_least_one_iterable = True par_length = len(par) except TypeError: pass if at_least_one_iterable: return (n, par_length) else: return n class WeibullDistribution(Distribution): """ A weibull distribution. The distributions probability density function is given by [1]_ : :math:`f(x) = \\frac{\\beta}{\\alpha} \\left (\\frac{x-\\gamma}{\\alpha} \\right)^{\\beta -1} \\exp \\left[-\\left( \\frac{x-\\gamma}{\\alpha} \\right)^{\\beta} \\right]` Parameters ---------- alpha : float Scale parameter of the weibull distribution. Defaults to 1. beta : float Shape parameter of the weibull distribution. Defaults to 1. gamma : float Location parameter of the weibull distribution (3-parameter weibull distribution). Defaults to 0. f_alpha : float Fixed scale parameter of the weibull distribution (e.g. given physical parameter). If this parameter is set, lambda is ignored. The fixed parameter does not change, even when fitting it. Defaults to None. f_beta : float Fixed shape parameter of the weibull distribution (e.g. given physical parameter). If this parameter is set, k is ignored. The fixed parameter does not change, even when fitting it. Defaults to None. f_gamma : float Fixed location parameter of the weibull distribution (e.g. given physical parameter). If this parameter is set, theta is ignored. The fixed parameter does not change, even when fitting it. Defaults to None. References ---------- .. [1] <NAME>.; <NAME>.; <NAME>.; <NAME>.(2017) Deriving environmental contours from highest density regions. Coastal Engineering 123 (2017) 42–51. """ def __init__( self, alpha=1, beta=1, gamma=0, f_alpha=None, f_beta=None, f_gamma=None ): # TODO set parameters to fixed values if provided self.alpha = alpha # scale self.beta = beta # shape self.gamma = gamma # loc self.f_alpha = f_alpha self.f_beta = f_beta self.f_gamma = f_gamma @property def parameters(self): return {"alpha": self.alpha, "beta": self.beta, "gamma": self.gamma} def _get_scipy_parameters(self, alpha, beta, gamma): if alpha is None: alpha = self.alpha if beta is None: beta = self.beta if gamma is None: gamma = self.gamma return beta, gamma, alpha # shape, loc, scale def cdf(self, x, alpha=None, beta=None, gamma=None): """ Cumulative distribution function. Parameters ---------- x : array_like, Points at which the cdf is evaluated. Shape: 1-dimensional. alpha : float, optional The scale parameter. Defaults to self.alpha. beta : float, optional The shape parameter. Defaults to self.beta. gamma: float, optional The location parameter . Defaults to self.gamma. """ scipy_par = self._get_scipy_parameters(alpha, beta, gamma) return sts.weibull_min.cdf(x, *scipy_par) def icdf(self, prob, alpha=None, beta=None, gamma=None): """ Inverse cumulative distribution function. Parameters ---------- prob : array_like Probabilities for which the i_cdf is evaluated. Shape: 1-dimensional alpha : float, optional The scale parameter. Defaults to self.aplha . beta : float, optional The shape parameter. Defaults to self.beta. gamma: float, optional The location parameter . Defaults to self.gamma. """ scipy_par = self._get_scipy_parameters(alpha, beta, gamma) return sts.weibull_min.ppf(prob, *scipy_par) def pdf(self, x, alpha=None, beta=None, gamma=None): """ Probability density function. Parameters ---------- x : array_like, Points at which the pdf is evaluated. Shape: 1-dimensional. alpha_ : float, optional The scale parameter. Defaults to self.alpha. beta : float, optional The shape parameter. Defaults to self.beta. gamma: float, optional The location parameter . Defaults to self.gamma. """ scipy_par = self._get_scipy_parameters(alpha, beta, gamma) return sts.weibull_min.pdf(x, *scipy_par) def draw_sample(self, n, alpha=None, beta=None, gamma=None): scipy_par = self._get_scipy_parameters(alpha, beta, gamma) rvs_size = self._get_rvs_size(n, scipy_par) return sts.weibull_min.rvs(*scipy_par, size=rvs_size) def _fit_mle(self, sample): p0 = {"alpha": self.alpha, "beta": self.beta, "gamma": self.gamma} fparams = {} if self.f_beta is not None: fparams["f0"] = self.f_beta if self.f_gamma is not None: fparams["floc"] = self.f_gamma if self.f_alpha is not None: fparams["fscale"] = self.f_alpha self.beta, self.gamma, self.alpha = sts.weibull_min.fit( sample, p0["beta"], loc=p0["gamma"], scale=p0["alpha"], **fparams ) def _fit_lsq(self, data, weights): raise NotImplementedError() class LogNormalDistribution(Distribution): """ A Lognormal Distribution. The distributions probability density function is given by [2]_: :math:`f(x) = \\frac{1}{x\\widetilde{\\sigma} \\sqrt{2\\pi}}\\exp \\left[ \\frac{-(\\ln x - \\widetilde{\\mu})^2}{2\\widetilde{\\sigma}^2}\\right]` Parameters ---------- mu : float Mean parameter of the corresponding normal distribution. Defaults to 0. sigma : float Standard deviation of the corresponding normal distribution. Defaults to 1. f_mu : float Fixed parameter mu of the lognormal distribution (e.g. given physical parameter). If this parameter is set, mu is ignored. The fixed parameter does not change, even when fitting it. Defaults to None. f_sigma : float Fixed parameter sigma of the lognormal distribution (e.g. given physical parameter). If this parameter is set, sigma is ignored. The fixed parameter does not change, even when fitting it. Defaults to None. References ---------- .. [2] <NAME>.; <NAME>.; <NAME>; <NAME>. (2011) Statistical Distributions, 4th Edition, Published by John Wiley & Sons, Inc., Hoboken, New Jersey., Pages 131-132 """ def __init__(self, mu=0, sigma=1, f_mu=None, f_sigma=None): self.mu = mu self.sigma = sigma # shape self.f_mu = f_mu self.f_sigma = f_sigma # self.scale = math.exp(mu) @property def parameters(self): return {"mu": self.mu, "sigma": self.sigma} @property def _scale(self): return np.exp(self.mu) @_scale.setter def _scale(self, val): self.mu = np.log(val) def _get_scipy_parameters(self, mu, sigma): if mu is None: scale = self._scale else: scale = np.exp(mu) if sigma is None: sigma = self.sigma return sigma, 0, scale # shape, loc, scale def cdf(self, x, mu=None, sigma=None): """ Cumulative distribution function. Parameters ---------- x : array_like, Points at which the cdf is evaluated. Shape: 1-dimensional. mu : float, optional The variance parameter. Defaults to self.mu . sigma : float, optional The shape parameter. Defaults to self.sigma . """ scipy_par = self._get_scipy_parameters(mu, sigma) return sts.lognorm.cdf(x, *scipy_par) def icdf(self, prob, mu=None, sigma=None): """ Inverse cumulative distribution function. Parameters ---------- prob : Probabilities for which the i_cdf is evaluated. Shape: 1-dimensional mu : float, optional The variance parameter. Defaults to self.mu . sigma : float, optional The shape parameter. Defaults to self.sigma . """ scipy_par = self._get_scipy_parameters(mu, sigma) return sts.lognorm.ppf(prob, *scipy_par) def pdf(self, x, mu=None, sigma=None): """ Probability density function. Parameters ---------- x : array_like, Points at which the pdf is evaluated. Shape: 1-dimensional. mu : float, optional The variance parameter. Defaults to self.mu . sigma : float, optional The shape parameter. Defaults to self.sigma . """ scipy_par = self._get_scipy_parameters(mu, sigma) return sts.lognorm.pdf(x, *scipy_par) def draw_sample(self, n, mu=None, sigma=None): scipy_par = self._get_scipy_parameters(mu, sigma) rvs_size = self._get_rvs_size(n, scipy_par) return sts.lognorm.rvs(*scipy_par, size=rvs_size) def _fit_mle(self, sample): p0 = {"scale": self._scale, "sigma": self.sigma} fparams = {"floc": 0} if self.f_sigma is not None: fparams["f0"] = self.f_sigma if self.f_mu is not None: fparams["fscale"] = math.exp(self.f_mu) # scale0 = math.exp(p0["mu"]) self.sigma, _, self._scale = sts.lognorm.fit( sample, p0["sigma"], scale=p0["scale"], **fparams ) # self.mu = math.log(self._scale) def _fit_lsq(self, data, weights): raise NotImplementedError() class NormalDistribution(Distribution): """ A Normal (Gaussian) Distribution. The distributions probability density function is given by [3]_: :math:`f(x) = \\frac{1}{{\\sigma} \\sqrt{2\\pi}} \\exp \\left( - \\frac{( x - \\mu)^2}{2\\sigma^2}\\right)` Parameters ---------- mu : float Location parameter, the mean. Defaults to 0. sigma : float Scale parameter, the standard deviation. Defaults to 1. f_mu : float Fixed parameter mu of the normal distribution (e.g. given physical parameter). If this parameter is set, mu is ignored. The fixed parameter does not change, even when fitting it. Defaults to None. f_sigma : float Fixed parameter sigma of the normal distribution (e.g. given physical parameter). If this parameter is set, sigma is ignored. Defaults to None. References ---------- .. [3] <NAME>.; <NAME>.; <NAME>; <NAME>. (2011) Statistical Distributions, 4th Edition, Published by John Wiley & Sons, Inc., Hoboken, New Jersey., Page 143 """ def __init__(self, mu=0, sigma=1, f_mu=None, f_sigma=None): self.mu = mu # location self.sigma = sigma # scale self.f_mu = f_mu self.f_sigma = f_sigma @property def parameters(self): return {"mu": self.mu, "sigma": self.sigma} def _get_scipy_parameters(self, mu, sigma): if mu is None: loc = self.mu else: loc = mu if sigma is None: scale = self.sigma else: scale = self.sigma return loc, scale # loc, scale def cdf(self, x, mu=None, sigma=None): """ Cumulative distribution function. Parameters ---------- x : array_like, Points at which the cdf is evaluated. Shape: 1-dimensional. mu : float, optional The location parameter. Defaults to self.mu . sigma : float, optional The scale parameter. Defaults to self.sigma . """ scipy_par = self._get_scipy_parameters(mu, sigma) return sts.norm.cdf(x, *scipy_par) def icdf(self, prob, mu=None, sigma=None): """ Inverse cumulative distribution function. Parameters ---------- prob : Probabilities for which the i_cdf is evaluated. Shape: 1-dimensional mu : float, optional The location parameter. Defaults to self.mu . sigma : float, optional The scale parameter. Defaults to self.sigma . """ scipy_par = self._get_scipy_parameters(mu, sigma) return sts.norm.ppf(prob, *scipy_par) def pdf(self, x, mu=None, sigma=None): """ Probability density function. Parameters ---------- x : array_like, Points at which the pdf is evaluated. Shape: 1-dimensional. mu : float, optional The location parameter. Defaults to self.mu . sigma : float, optional The scale parameter. Defaults to self.sigma . """ scipy_par = self._get_scipy_parameters(mu, sigma) return sts.norm.pdf(x, *scipy_par) def draw_sample(self, n, mu=None, sigma=None): scipy_par = self._get_scipy_parameters(mu, sigma) rvs_size = self._get_rvs_size(n, scipy_par) return sts.norm.rvs(*scipy_par, size=rvs_size) def _fit_mle(self, sample): p0 = {"loc": self.mu, "scale": self.sigma} fparams = {} if self.f_mu is not None: fparams["floc"] = self.f_mu if self.f_sigma is not None: fparams["fscale"] = self.f_sigma self.mu, self.sigma = sts.lognorm.fit( sample, loc=p0["loc"], scale=p0["scale"], **fparams ) def _fit_lsq(self, data, weights): raise NotImplementedError() class LogNormalNormFitDistribution(LogNormalDistribution): # https://en.wikipedia.org/wiki/Log-normal_distribution#Estimation_of_parameters """ A Lognormal Distribution. The distributions probability density function is given by: :math:`f(x) = \\frac{1}{x\\widetilde{\\sigma} \\sqrt{2\\pi}}\\exp \\left[ \\frac{-(\\ln x - \\widetilde{\\mu})^2}{2\\widetilde{\\sigma}^2}\\right]` Parameters ---------- mu : float Mean parameter of the corresponding normal distribution. Defaults to 0. sigma : float Variance parameter of the corresponding normal distribution. Defaults to 1. f_mu : float Fixed parameter mu of the lognormal distribution (e.g. given physical parameter). If this parameter is set, mu is ignored. The fixed parameter does not change, even when fitting it. Defaults to None. f_sigma : float Fixed parameter sigma of the lognormal distribution (e.g. given physical parameter). If this parameter is set, sigma is ignored. The fixed parameter does not change, even when fitting it. Defaults to None. """ def __init__(self, mu_norm=0, sigma_norm=1, f_mu_norm=None, f_sigma_norm=None): self.mu_norm = mu_norm self.sigma_norm = sigma_norm self.f_mu_norm = f_mu_norm self.f_sigma_norm = f_sigma_norm @property def parameters(self): return {"mu_norm": self.mu_norm, "sigma_norm": self.sigma_norm} @property def mu(self): return self.calculate_mu(self.mu_norm, self.sigma_norm) @staticmethod def calculate_mu(mu_norm, sigma_norm): return np.log(mu_norm / np.sqrt(1 + sigma_norm ** 2 / mu_norm ** 2)) # return np.log(mu_norm**2 * np.sqrt(1 / (sigma_norm**2 + mu_norm**2))) @property def sigma(self): return self.calculate_sigma(self.mu_norm, self.sigma_norm) @staticmethod def calculate_sigma(mu_norm, sigma_norm): # return np.sqrt(np.log(1 + sigma_norm**2 / mu_norm**2)) return np.sqrt(np.log(1 + (sigma_norm ** 2 / mu_norm ** 2))) def _get_scipy_parameters(self, mu_norm, sigma_norm): if (mu_norm is None) != (sigma_norm is None): raise RuntimeError( "mu_norm and sigma_norm have to be passed both or not at all" ) if mu_norm is None: scale = self._scale sigma = self.sigma else: sigma = self.calculate_sigma(mu_norm, sigma_norm) mu = self.calculate_mu(mu_norm, sigma_norm) scale = np.exp(mu) return sigma, 0, scale # shape, loc, scale def cdf(self, x, mu_norm=None, sigma_norm=None): scipy_par = self._get_scipy_parameters(mu_norm, sigma_norm) return sts.lognorm.cdf(x, *scipy_par) def icdf(self, prob, mu_norm=None, sigma_norm=None): scipy_par = self._get_scipy_parameters(mu_norm, sigma_norm) return sts.lognorm.ppf(prob, *scipy_par) def pdf(self, x, mu_norm=None, sigma_norm=None): scipy_par = self._get_scipy_parameters(mu_norm, sigma_norm) return sts.lognorm.pdf(x, *scipy_par) def draw_sample(self, n, mu_norm=None, sigma_norm=None): scipy_par = self._get_scipy_parameters(mu_norm, sigma_norm) rvs_size = self._get_rvs_size(n, scipy_par) return sts.lognorm.rvs(*scipy_par, size=rvs_size) def _fit_mle(self, sample): if self.f_mu_norm is None: self.mu_norm =
np.mean(sample)
numpy.mean
import csv import numpy as np import key import time import shutil import os def cal_ave(path): data= [] with open(path,"r") as f: reader= csv.reader(f) num=0 for row in (reader): if(num==0): num=1 continue row = row[6:] row = [float(i) for i in row] data.append(row) data= np.array(data[1:]) data=
np.reshape(data, (-1, 6))
numpy.reshape
# Get Python six functionality: from __future__ import absolute_import, division, print_function, unicode_literals import keras.layers import keras.models import numpy as np import pytest import innvestigate.tools.perturbate import innvestigate.utils as iutils ############################################################################### ############################################################################### ############################################################################### ############################################################################### ############################################################################### ############################################################################### @pytest.mark.fast @pytest.mark.precommit def test_fast__PerturbationAnalysis(): # Some test data if keras.backend.image_data_format() == "channels_first": input_shape = (2, 1, 4, 4) else: input_shape = (2, 4, 4, 1) x = np.arange(2 * 4 * 4).reshape(input_shape) generator = iutils.BatchSequence([x, np.zeros(x.shape[0])], batch_size=x.shape[0]) # Simple model model = keras.models.Sequential( [ keras.layers.Flatten(input_shape=x.shape[1:]), keras.layers.Dense(1, use_bias=False), ] ) weights = np.arange(4 * 4 * 1).reshape((4 * 4, 1)) model.layers[-1].set_weights([weights]) model.compile(loss="mean_squared_error", optimizer="sgd") expected_output = np.array([[1240.0], [3160.0]]) assert np.all(np.isclose(model.predict(x), expected_output)) # Analyzer analyzer = innvestigate.create_analyzer("gradient", model, postprocess="abs") # Run perturbation analysis perturbation = innvestigate.tools.perturbate.Perturbation( "zeros", region_shape=(2, 2), in_place=False ) perturbation_analysis = innvestigate.tools.perturbate.PerturbationAnalysis( analyzer, model, generator, perturbation, recompute_analysis=False, steps=3, regions_per_step=1, verbose=False, ) scores = perturbation_analysis.compute_perturbation_analysis() expected_scores = np.array([5761600.0, 1654564.0, 182672.0, 21284.0]) assert np.all(np.isclose(scores, expected_scores)) @pytest.mark.fast @pytest.mark.precommit def test_fast__Perturbation(): if keras.backend.image_data_format() == "channels_first": input_shape = (1, 1, 4, 4) else: input_shape = (1, 4, 4, 1) x = np.arange(1 * 4 * 4).reshape(input_shape) perturbation = innvestigate.tools.perturbate.Perturbation( "zeros", region_shape=(2, 2), in_place=False ) analysis = np.zeros((4, 4)) analysis[:2, 2:] = 1 analysis[2:, :2] = 2 analysis[2:, 2:] = 3 analysis = analysis.reshape(input_shape) if keras.backend.image_data_format() == "channels_last": x = np.moveaxis(x, 3, 1) analysis = np.moveaxis(analysis, 3, 1) analysis = perturbation.reduce_function(analysis, axis=1, keepdims=True) aggregated_regions = perturbation.aggregate_regions(analysis) assert np.all( np.isclose(aggregated_regions[0, 0, :, :], np.array([[0, 1], [2, 3]])) ) ranks = perturbation.compute_region_ordering(aggregated_regions) assert np.all(np.isclose(ranks[0, 0, :, :],
np.array([[3, 2], [1, 0]])
numpy.array
""" # 3D brain mesh """ import numpy as np from nilearn import datasets from nilearn.surface import load_surf_data, load_surf_mesh, vol_to_surf from nilearn import plotting from datoviz import canvas, run, colormap # Get the data: fsaverage = datasets.fetch_surf_fsaverage() # Left hemisphere. mesh = load_surf_mesh(fsaverage['pial_left']) coords, faces = mesh[0], mesh[1] bg_data = load_surf_data(fsaverage['sulc_left']) # Right hemisphere. mesh = load_surf_mesh(fsaverage['pial_right']) coords2, faces2 = mesh[0], mesh[1] bg_data2 = load_surf_data(fsaverage['sulc_right']) # Concatenate. coords = np.vstack((coords, coords2)) faces = np.vstack((faces, faces2 + faces.max() + 1)) bg_data = np.concatenate((bg_data, bg_data2)) # Depth background data. bg_data = (bg_data - bg_data.min()) / (bg_data.max() - bg_data.min()) N = bg_data.shape[0] # HACK: uv tex coords to fetch the right colormap value. To be improved cmap = 0 uv = np.c_[bg_data,
np.ones(N)
numpy.ones
from __future__ import division, absolute_import, print_function import warnings import numpy as np from numpy.testing import ( run_module_suite, TestCase, assert_, assert_equal, assert_almost_equal, assert_no_warnings, assert_raises, assert_array_equal, suppress_warnings ) # Test data _ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170], [0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833], [np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954], [0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]]) # Rows of _ndat with nans removed _rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]), np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]), np.array([0.1042, -0.5954]), np.array([0.1610, 0.1859, 0.3146])] # Rows of _ndat with nans converted to ones _ndat_ones = np.array([[0.6244, 1.0, 0.2692, 0.0116, 1.0, 0.1170], [0.5351, -0.9403, 1.0, 0.2100, 0.4759, 0.2833], [1.0, 1.0, 1.0, 0.1042, 1.0, -0.5954], [0.1610, 1.0, 1.0, 0.1859, 0.3146, 1.0]]) # Rows of _ndat with nans converted to zeros _ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170], [0.5351, -0.9403, 0.0, 0.2100, 0.4759, 0.2833], [0.0, 0.0, 0.0, 0.1042, 0.0, -0.5954], [0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]]) class TestNanFunctions_MinMax(TestCase): nanfuncs = [np.nanmin, np.nanmax] stdfuncs = [np.min, np.max] def test_mutation(self): # Check that passed array is not modified. ndat = _ndat.copy() for f in self.nanfuncs: f(ndat) assert_equal(ndat, _ndat) def test_keepdims(self): mat = np.eye(3) for nf, rf in zip(self.nanfuncs, self.stdfuncs): for axis in [None, 0, 1]: tgt = rf(mat, axis=axis, keepdims=True) res = nf(mat, axis=axis, keepdims=True) assert_(res.ndim == tgt.ndim) def test_out(self): mat = np.eye(3) for nf, rf in zip(self.nanfuncs, self.stdfuncs): resout = np.zeros(3) tgt = rf(mat, axis=1) res = nf(mat, axis=1, out=resout) assert_almost_equal(res, resout) assert_almost_equal(res, tgt) def test_dtype_from_input(self): codes = 'efdgFDG' for nf, rf in zip(self.nanfuncs, self.stdfuncs): for c in codes: mat = np.eye(3, dtype=c) tgt = rf(mat, axis=1).dtype.type res = nf(mat, axis=1).dtype.type assert_(res is tgt) # scalar case tgt = rf(mat, axis=None).dtype.type res = nf(mat, axis=None).dtype.type assert_(res is tgt) def test_result_values(self): for nf, rf in zip(self.nanfuncs, self.stdfuncs): tgt = [rf(d) for d in _rdat] res = nf(_ndat, axis=1) assert_almost_equal(res, tgt) def test_allnans(self): mat = np.array([np.nan]*9).reshape(3, 3) for f in self.nanfuncs: for axis in [None, 0, 1]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_(np.isnan(f(mat, axis=axis)).all()) assert_(len(w) == 1, 'no warning raised') assert_(issubclass(w[0].category, RuntimeWarning)) # Check scalars with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_(np.isnan(f(np.nan))) assert_(len(w) == 1, 'no warning raised') assert_(issubclass(w[0].category, RuntimeWarning)) def test_masked(self): mat = np.ma.fix_invalid(_ndat) msk = mat._mask.copy() for f in [np.nanmin]: res = f(mat, axis=1) tgt = f(_ndat, axis=1) assert_equal(res, tgt) assert_equal(mat._mask, msk) assert_(not np.isinf(mat).any()) def test_scalar(self): for f in self.nanfuncs: assert_(f(0.) == 0.) def test_matrices(self): # Check that it works and that type and # shape are preserved mat = np.matrix(np.eye(3)) for f in self.nanfuncs: res = f(mat, axis=0) assert_(isinstance(res, np.matrix)) assert_(res.shape == (1, 3)) res = f(mat, axis=1) assert_(isinstance(res, np.matrix)) assert_(res.shape == (3, 1)) res = f(mat) assert_(np.isscalar(res)) # check that rows of nan are dealt with for subclasses (#4628) mat[1] = np.nan for f in self.nanfuncs: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') res = f(mat, axis=0) assert_(isinstance(res, np.matrix)) assert_(not np.any(np.isnan(res))) assert_(len(w) == 0) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') res = f(mat, axis=1) assert_(isinstance(res, np.matrix)) assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0]) and not np.isnan(res[2, 0])) assert_(len(w) == 1, 'no warning raised') assert_(issubclass(w[0].category, RuntimeWarning)) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') res = f(mat) assert_(np.isscalar(res)) assert_(res != np.nan) assert_(len(w) == 0) class TestNanFunctions_ArgminArgmax(TestCase): nanfuncs = [np.nanargmin, np.nanargmax] def test_mutation(self): # Check that passed array is not modified. ndat = _ndat.copy() for f in self.nanfuncs: f(ndat) assert_equal(ndat, _ndat) def test_result_values(self): for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]): for row in _ndat: with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered in") ind = f(row) val = row[ind] # comparing with NaN is tricky as the result # is always false except for NaN != NaN assert_(not np.isnan(val)) assert_(not fcmp(val, row).any()) assert_(not np.equal(val, row[:ind]).any()) def test_allnans(self): mat = np.array([np.nan]*9).reshape(3, 3) for f in self.nanfuncs: for axis in [None, 0, 1]: assert_raises(ValueError, f, mat, axis=axis) assert_raises(ValueError, f, np.nan) def test_empty(self): mat = np.zeros((0, 3)) for f in self.nanfuncs: for axis in [0, None]: assert_raises(ValueError, f, mat, axis=axis) for axis in [1]: res = f(mat, axis=axis) assert_equal(res, np.zeros(0)) def test_scalar(self): for f in self.nanfuncs: assert_(f(0.) == 0.) def test_matrices(self): # Check that it works and that type and # shape are preserved mat = np.matrix(np.eye(3)) for f in self.nanfuncs: res = f(mat, axis=0) assert_(isinstance(res, np.matrix)) assert_(res.shape == (1, 3)) res = f(mat, axis=1) assert_(isinstance(res, np.matrix)) assert_(res.shape == (3, 1)) res = f(mat) assert_(np.isscalar(res)) class TestNanFunctions_IntTypes(TestCase): int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64) mat = np.array([127, 39, 93, 87, 46]) def integer_arrays(self): for dtype in self.int_types: yield self.mat.astype(dtype) def test_nanmin(self): tgt = np.min(self.mat) for mat in self.integer_arrays(): assert_equal(np.nanmin(mat), tgt) def test_nanmax(self): tgt = np.max(self.mat) for mat in self.integer_arrays(): assert_equal(np.nanmax(mat), tgt) def test_nanargmin(self): tgt = np.argmin(self.mat) for mat in self.integer_arrays(): assert_equal(np.nanargmin(mat), tgt) def test_nanargmax(self): tgt = np.argmax(self.mat) for mat in self.integer_arrays(): assert_equal(np.nanargmax(mat), tgt) def test_nansum(self): tgt = np.sum(self.mat) for mat in self.integer_arrays(): assert_equal(np.nansum(mat), tgt) def test_nanprod(self): tgt = np.prod(self.mat) for mat in self.integer_arrays(): assert_equal(np.nanprod(mat), tgt) def test_nancumsum(self): tgt = np.cumsum(self.mat) for mat in self.integer_arrays(): assert_equal(np.nancumsum(mat), tgt) def test_nancumprod(self): tgt = np.cumprod(self.mat) for mat in self.integer_arrays(): assert_equal(np.nancumprod(mat), tgt) def test_nanmean(self): tgt = np.mean(self.mat) for mat in self.integer_arrays(): assert_equal(np.nanmean(mat), tgt) def test_nanvar(self): tgt = np.var(self.mat) for mat in self.integer_arrays(): assert_equal(np.nanvar(mat), tgt) tgt = np.var(mat, ddof=1) for mat in self.integer_arrays(): assert_equal(np.nanvar(mat, ddof=1), tgt) def test_nanstd(self): tgt = np.std(self.mat) for mat in self.integer_arrays(): assert_equal(np.nanstd(mat), tgt) tgt = np.std(self.mat, ddof=1) for mat in self.integer_arrays(): assert_equal(np.nanstd(mat, ddof=1), tgt) class SharedNanFunctionsTestsMixin(object): def test_mutation(self): # Check that passed array is not modified. ndat = _ndat.copy() for f in self.nanfuncs: f(ndat) assert_equal(ndat, _ndat) def test_keepdims(self): mat = np.eye(3) for nf, rf in zip(self.nanfuncs, self.stdfuncs): for axis in [None, 0, 1]: tgt = rf(mat, axis=axis, keepdims=True) res = nf(mat, axis=axis, keepdims=True) assert_(res.ndim == tgt.ndim) def test_out(self): mat = np.eye(3) for nf, rf in zip(self.nanfuncs, self.stdfuncs): resout = np.zeros(3) tgt = rf(mat, axis=1) res = nf(mat, axis=1, out=resout) assert_almost_equal(res, resout) assert_almost_equal(res, tgt) def test_dtype_from_dtype(self): mat = np.eye(3) codes = 'efdgFDG' for nf, rf in zip(self.nanfuncs, self.stdfuncs): for c in codes: with suppress_warnings() as sup: if nf in {np.nanstd, np.nanvar} and c in 'FDG': # Giving the warning is a small bug, see gh-8000 sup.filter(np.ComplexWarning) tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type assert_(res is tgt) # scalar case tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type assert_(res is tgt) def test_dtype_from_char(self): mat = np.eye(3) codes = 'efdgFDG' for nf, rf in zip(self.nanfuncs, self.stdfuncs): for c in codes: with suppress_warnings() as sup: if nf in {np.nanstd, np.nanvar} and c in 'FDG': # Giving the warning is a small bug, see gh-8000 sup.filter(np.ComplexWarning) tgt = rf(mat, dtype=c, axis=1).dtype.type res = nf(mat, dtype=c, axis=1).dtype.type assert_(res is tgt) # scalar case tgt = rf(mat, dtype=c, axis=None).dtype.type res = nf(mat, dtype=c, axis=None).dtype.type assert_(res is tgt) def test_dtype_from_input(self): codes = 'efdgFDG' for nf, rf in zip(self.nanfuncs, self.stdfuncs): for c in codes: mat = np.eye(3, dtype=c) tgt = rf(mat, axis=1).dtype.type res = nf(mat, axis=1).dtype.type assert_(res is tgt, "res %s, tgt %s" % (res, tgt)) # scalar case tgt = rf(mat, axis=None).dtype.type res = nf(mat, axis=None).dtype.type assert_(res is tgt) def test_result_values(self): for nf, rf in zip(self.nanfuncs, self.stdfuncs): tgt = [rf(d) for d in _rdat] res = nf(_ndat, axis=1) assert_almost_equal(res, tgt) def test_scalar(self): for f in self.nanfuncs: assert_(f(0.) == 0.) def test_matrices(self): # Check that it works and that type and # shape are preserved mat = np.matrix(np.eye(3)) for f in self.nanfuncs: res = f(mat, axis=0) assert_(isinstance(res, np.matrix)) assert_(res.shape == (1, 3)) res = f(mat, axis=1) assert_(isinstance(res, np.matrix)) assert_(res.shape == (3, 1)) res = f(mat) assert_(np.isscalar(res)) class TestNanFunctions_SumProd(TestCase, SharedNanFunctionsTestsMixin): nanfuncs = [np.nansum, np.nanprod] stdfuncs = [np.sum, np.prod] def test_allnans(self): # Check for FutureWarning with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') res = np.nansum([np.nan]*3, axis=None) assert_(res == 0, 'result is not 0') assert_(len(w) == 0, 'warning raised') # Check scalar res = np.nansum(np.nan) assert_(res == 0, 'result is not 0') assert_(len(w) == 0, 'warning raised') # Check there is no warning for not all-nan np.nansum([0]*3, axis=None) assert_(len(w) == 0, 'unwanted warning raised') def test_empty(self): for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]): mat = np.zeros((0, 3)) tgt = [tgt_value]*3 res = f(mat, axis=0) assert_equal(res, tgt) tgt = [] res = f(mat, axis=1) assert_equal(res, tgt) tgt = tgt_value res = f(mat, axis=None) assert_equal(res, tgt) class TestNanFunctions_CumSumProd(TestCase, SharedNanFunctionsTestsMixin): nanfuncs = [np.nancumsum, np.nancumprod] stdfuncs = [np.cumsum, np.cumprod] def test_allnans(self): for f, tgt_value in zip(self.nanfuncs, [0, 1]): # Unlike other nan-functions, sum/prod/cumsum/cumprod don't warn on all nan input with assert_no_warnings(): res = f([np.nan]*3, axis=None) tgt = tgt_value*np.ones((3)) assert_(np.array_equal(res, tgt), 'result is not %s * np.ones((3))' % (tgt_value)) # Check scalar res = f(np.nan) tgt = tgt_value*np.ones((1)) assert_(np.array_equal(res, tgt), 'result is not %s * np.ones((1))' % (tgt_value)) # Check there is no warning for not all-nan f([0]*3, axis=None) def test_empty(self): for f, tgt_value in zip(self.nanfuncs, [0, 1]): mat = np.zeros((0, 3)) tgt = tgt_value*np.ones((0, 3)) res = f(mat, axis=0) assert_equal(res, tgt) tgt = mat res = f(mat, axis=1) assert_equal(res, tgt) tgt = np.zeros((0)) res = f(mat, axis=None) assert_equal(res, tgt) def test_keepdims(self): for f, g in zip(self.nanfuncs, self.stdfuncs): mat = np.eye(3) for axis in [None, 0, 1]: tgt = f(mat, axis=axis, out=None) res = g(mat, axis=axis, out=None) assert_(res.ndim == tgt.ndim) for f in self.nanfuncs: d = np.ones((3, 5, 7, 11)) # Randomly set some elements to NaN: rs = np.random.RandomState(0) d[rs.rand(*d.shape) < 0.5] = np.nan res = f(d, axis=None) assert_equal(res.shape, (1155,)) for axis in np.arange(4): res = f(d, axis=axis) assert_equal(res.shape, (3, 5, 7, 11)) def test_matrices(self): # Check that it works and that type and # shape are preserved mat = np.matrix(np.eye(3)) for f in self.nanfuncs: for axis in np.arange(2): res = f(mat, axis=axis) assert_(isinstance(res, np.matrix)) assert_(res.shape == (3, 3)) res = f(mat) assert_(res.shape == (1, 3*3)) def test_result_values(self): for axis in (-2, -1, 0, 1, None): tgt = np.cumprod(_ndat_ones, axis=axis) res = np.nancumprod(_ndat, axis=axis) assert_almost_equal(res, tgt) tgt = np.cumsum(_ndat_zeros,axis=axis) res = np.nancumsum(_ndat, axis=axis) assert_almost_equal(res, tgt) def test_out(self): mat = np.eye(3) for nf, rf in zip(self.nanfuncs, self.stdfuncs): resout = np.eye(3) for axis in (-2, -1, 0, 1): tgt = rf(mat, axis=axis) res = nf(mat, axis=axis, out=resout) assert_almost_equal(res, resout) assert_almost_equal(res, tgt) class TestNanFunctions_MeanVarStd(TestCase, SharedNanFunctionsTestsMixin): nanfuncs = [np.nanmean, np.nanvar, np.nanstd] stdfuncs = [np.mean, np.var, np.std] def test_dtype_error(self): for f in self.nanfuncs: for dtype in [np.bool_, np.int_, np.object_]: assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype) def test_out_dtype_error(self): for f in self.nanfuncs: for dtype in [np.bool_, np.int_, np.object_]: out = np.empty(_ndat.shape[0], dtype=dtype) assert_raises(TypeError, f, _ndat, axis=1, out=out) def test_ddof(self): nanfuncs = [np.nanvar, np.nanstd] stdfuncs = [np.var, np.std] for nf, rf in zip(nanfuncs, stdfuncs): for ddof in [0, 1]: tgt = [rf(d, ddof=ddof) for d in _rdat] res = nf(_ndat, axis=1, ddof=ddof) assert_almost_equal(res, tgt) def test_ddof_too_big(self): nanfuncs = [np.nanvar, np.nanstd] stdfuncs = [np.var, np.std] dsize = [len(d) for d in _rdat] for nf, rf in zip(nanfuncs, stdfuncs): for ddof in range(5): with suppress_warnings() as sup: sup.record(RuntimeWarning) sup.filter(np.ComplexWarning) tgt = [ddof >= d for d in dsize] res = nf(_ndat, axis=1, ddof=ddof) assert_equal(np.isnan(res), tgt) if any(tgt): assert_(len(sup.log) == 1) else: assert_(len(sup.log) == 0) def test_allnans(self): mat = np.array([np.nan]*9).reshape(3, 3) for f in self.nanfuncs: for axis in [None, 0, 1]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_(np.isnan(f(mat, axis=axis)).all()) assert_(len(w) == 1) assert_(issubclass(w[0].category, RuntimeWarning)) # Check scalar assert_(np.isnan(f(np.nan))) assert_(len(w) == 2) assert_(issubclass(w[0].category, RuntimeWarning)) def test_empty(self): mat = np.zeros((0, 3)) for f in self.nanfuncs: for axis in [0, None]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_(np.isnan(f(mat, axis=axis)).all()) assert_(len(w) == 1) assert_(issubclass(w[0].category, RuntimeWarning)) for axis in [1]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_equal(f(mat, axis=axis), np.zeros([])) assert_(len(w) == 0) class TestNanFunctions_Median(TestCase): def test_mutation(self): # Check that passed array is not modified. ndat = _ndat.copy() np.nanmedian(ndat) assert_equal(ndat, _ndat) def test_keepdims(self): mat = np.eye(3) for axis in [None, 0, 1]: tgt = np.median(mat, axis=axis, out=None, overwrite_input=False) res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False) assert_(res.ndim == tgt.ndim) d = np.ones((3, 5, 7, 11)) # Randomly set some elements to NaN: w = np.random.random((4, 200)) * np.array(d.shape)[:, None] w = w.astype(np.intp) d[tuple(w)] = np.nan with suppress_warnings() as sup: sup.filter(RuntimeWarning) res = np.nanmedian(d, axis=None, keepdims=True) assert_equal(res.shape, (1, 1, 1, 1)) res = np.nanmedian(d, axis=(0, 1), keepdims=True) assert_equal(res.shape, (1, 1, 7, 11)) res = np.nanmedian(d, axis=(0, 3), keepdims=True) assert_equal(res.shape, (1, 5, 7, 1)) res = np.nanmedian(d, axis=(1,), keepdims=True) assert_equal(res.shape, (3, 1, 7, 11)) res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True) assert_equal(res.shape, (1, 1, 1, 1)) res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True) assert_equal(res.shape, (1, 1, 7, 1)) def test_out(self): mat = np.random.rand(3, 3) nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) resout = np.zeros(3) tgt = np.median(mat, axis=1) res = np.nanmedian(nan_mat, axis=1, out=resout) assert_almost_equal(res, resout) assert_almost_equal(res, tgt) # 0-d output: resout = np.zeros(()) tgt = np.median(mat, axis=None) res = np.nanmedian(nan_mat, axis=None, out=resout) assert_almost_equal(res, resout) assert_almost_equal(res, tgt) res = np.nanmedian(nan_mat, axis=(0, 1), out=resout) assert_almost_equal(res, resout) assert_almost_equal(res, tgt) def test_small_large(self): # test the small and large code paths, current cutoff 400 elements for s in [5, 20, 51, 200, 1000]: d = np.random.randn(4, s) # Randomly set some elements to NaN: w = np.random.randint(0, d.size, size=d.size // 5) d.ravel()[w] = np.nan d[:,0] = 1. # ensure at least one good value # use normal median without nans to compare tgt = [] for x in d: nonan = np.compress(~np.isnan(x), x) tgt.append(np.median(nonan, overwrite_input=True)) assert_array_equal(np.nanmedian(d, axis=-1), tgt) def test_result_values(self): tgt = [np.median(d) for d in _rdat] res = np.nanmedian(_ndat, axis=1) assert_almost_equal(res, tgt) def test_allnans(self): mat = np.array([np.nan]*9).reshape(3, 3) for axis in [None, 0, 1]: with suppress_warnings() as sup: sup.record(RuntimeWarning) assert_(np.isnan(np.nanmedian(mat, axis=axis)).all()) if axis is None: assert_(len(sup.log) == 1) else: assert_(len(sup.log) == 3) # Check scalar assert_(np.isnan(np.nanmedian(np.nan))) if axis is None: assert_(len(sup.log) == 2) else: assert_(len(sup.log) == 4) def test_empty(self): mat = np.zeros((0, 3)) for axis in [0, None]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_(np.isnan(np.nanmedian(mat, axis=axis)).all()) assert_(len(w) == 1) assert_(issubclass(w[0].category, RuntimeWarning)) for axis in [1]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_equal(np.nanmedian(mat, axis=axis), np.zeros([])) assert_(len(w) == 0) def test_scalar(self): assert_(np.nanmedian(0.) == 0.) def test_extended_axis_invalid(self): d = np.ones((3, 5, 7, 11)) assert_raises(np.AxisError, np.nanmedian, d, axis=-5) assert_raises(np.AxisError, np.nanmedian, d, axis=(0, -5)) assert_raises(np.AxisError, np.nanmedian, d, axis=4) assert_raises(np.AxisError, np.nanmedian, d, axis=(0, 4)) assert_raises(ValueError, np.nanmedian, d, axis=(1, 1)) def test_float_special(self): with suppress_warnings() as sup: sup.filter(RuntimeWarning) for inf in [np.inf, -np.inf]: a = np.array([[inf, np.nan], [np.nan, np.nan]]) assert_equal(np.nanmedian(a, axis=0), [inf, np.nan]) assert_equal(np.nanmedian(a, axis=1), [inf, np.nan]) assert_equal(np.nanmedian(a), inf) # minimum fill value check a = np.array([[np.nan, np.nan, inf], [np.nan, np.nan, inf]]) assert_equal(np.nanmedian(a), inf) assert_equal(np.nanmedian(a, axis=0), [np.nan, np.nan, inf]) assert_equal(np.nanmedian(a, axis=1), inf) # no mask path a = np.array([[inf, inf], [inf, inf]]) assert_equal(np.nanmedian(a, axis=1), inf) a = np.array([[inf, 7, -inf, -9], [-10, np.nan, np.nan, 5], [4, np.nan, np.nan, inf]], dtype=np.float32) if inf > 0: assert_equal(np.nanmedian(a, axis=0), [4., 7., -inf, 5.]) assert_equal(np.nanmedian(a), 4.5) else: assert_equal(np.nanmedian(a, axis=0), [-10., 7., -inf, -9.]) assert_equal(np.nanmedian(a), -2.5) assert_equal(np.nanmedian(a, axis=-1), [-1., -2.5, inf]) for i in range(0, 10): for j in range(1, 10): a = np.array([([np.nan] * i) + ([inf] * j)] * 2) assert_equal(np.nanmedian(a), inf) assert_equal(np.nanmedian(a, axis=1), inf) assert_equal(np.nanmedian(a, axis=0), ([np.nan] * i) + [inf] * j) a = np.array([([np.nan] * i) + ([-inf] * j)] * 2) assert_equal(np.nanmedian(a), -inf) assert_equal(np.nanmedian(a, axis=1), -inf) assert_equal(np.nanmedian(a, axis=0), ([np.nan] * i) + [-inf] * j) class TestNanFunctions_Percentile(TestCase): def test_mutation(self): # Check that passed array is not modified. ndat = _ndat.copy() np.nanpercentile(ndat, 30) assert_equal(ndat, _ndat) def test_keepdims(self): mat = np.eye(3) for axis in [None, 0, 1]: tgt = np.percentile(mat, 70, axis=axis, out=None, overwrite_input=False) res = np.nanpercentile(mat, 70, axis=axis, out=None, overwrite_input=False) assert_(res.ndim == tgt.ndim) d = np.ones((3, 5, 7, 11)) # Randomly set some elements to NaN: w = np.random.random((4, 200)) * np.array(d.shape)[:, None] w = w.astype(np.intp) d[tuple(w)] = np.nan with suppress_warnings() as sup: sup.filter(RuntimeWarning) res = np.nanpercentile(d, 90, axis=None, keepdims=True) assert_equal(res.shape, (1, 1, 1, 1)) res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True) assert_equal(res.shape, (1, 1, 7, 11)) res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True) assert_equal(res.shape, (1, 5, 7, 1)) res = np.nanpercentile(d, 90, axis=(1,), keepdims=True) assert_equal(res.shape, (3, 1, 7, 11)) res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True) assert_equal(res.shape, (1, 1, 1, 1)) res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True) assert_equal(res.shape, (1, 1, 7, 1)) def test_out(self): mat = np.random.rand(3, 3) nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) resout = np.zeros(3) tgt =
np.percentile(mat, 42, axis=1)
numpy.percentile
# pyline: disable=no-member """ plot3d using existing visuals : LinePlotVisual """ import numpy as np import sys from vispy import app, visuals, scene # build visuals Plot3D = scene.visuals.create_visual_node(visuals.LinePlotVisual) # build canvas canvas = scene.SceneCanvas(keys='interactive', title='plot3d', show=True) # Add a ViewBox to let the user zoom/rotate view = canvas.central_widget.add_view() view.camera = 'turntable' view.camera.fov = 45 view.camera.distance = 6 ############################### from scipy.io import loadmat track = np.load('/media/cat/256GB/donato/DON-003343/DON-003343_20210222/wheel/20210222/TRD-2P/wheel.npy') print (track.shape) track_sum = np.cumsum(track) print (track_sum.shape) print (track_sum) # track_pos = np.float32(-track_sum) # n_cycles_per_rotation = 500 track_circular = (track_pos % n_cycles_per_rotation)*360/n_cycles_per_rotation def cart2pol(x, y): rho = np.sqrt(x**2 + y**2) phi = np.arctan2(y, x) return(rho, phi) def pol2cart(rho, phi): x = rho *
np.cos(phi)
numpy.cos
#!/usr/bin/env python """ CMSC733 Spring 2020: Classical and Deep Learning Approaches for Geometric Computer Vision Homework 0: Alohomora: Phase 1 Starter Code Author(s): <NAME> (<EMAIL>) PhD Candidate in Computer Science, University of Maryland, College Park <NAME> (<EMAIL>) PhD Student in Computer Science, University of Maryland, College Park <NAME> (<EMAIL>) Graduate Student pursuing Masters in Robotics, University of Maryland, College Park """ # Code starts here: import numpy as np import cv2 import matplotlib.pyplot as plt from sklearn.cluster import KMeans import os import glob # Helper Functions. def rotateImage(image, angle): image_center = tuple(np.array(np.array(image).shape[1::-1]) / 2) rotation_matrix = cv2.getRotationMatrix2D(image_center, angle, 1.0) result = cv2.warpAffine(image, rotation_matrix, image.shape[1::-1], flags=cv2.INTER_LINEAR) return result def GaussianKernel(n, sigma): variance = sigma ** 2 size = int((n - 1) / 2) g = np.asarray([[(-y * np.exp(-1 * (x ** 2 + y ** 2) / (2 * variance))) for x in range(-size, size + 1)] for y in range(-size, size + 1)]) gauss = g / (2 * np.pi * variance * variance) return gauss def Gaussian1D(sigma, mu, x, order): x = np.array(x) - mu variance = sigma ** 2 gauss = (1 / np.sqrt(2 * np.pi * variance)) * (np.exp((-1 * x * x) / (2 * variance))) if order == 0: return gauss elif order == 1: gauss = - gauss * ((x) / (variance)) return gauss else: gauss = gauss * (((x * x) - variance) / (variance ** 2)) return gauss def Gaussian2D(k, sigma): size = int((k - 1) / 2) variance = sigma ** 2 s = np.asarray([[x ** 2 + y ** 2 for x in range(-size, size + 1)] for y in range(-size, size + 1)]) Gauss = (1 / np.sqrt(2 * np.pi * variance)) * np.exp(-s / (2 * variance)) return Gauss def LOG2D(k, sigma): size = int((k - 1) / 2) variance = sigma ** 2 s = np.asarray([[x ** 2 + y ** 2 for x in range(-size, size + 1)] for y in range(-size, size + 1)]) p = (1 / np.sqrt(2 * np.pi * variance)) * np.exp(-s / (2 * variance)) Laplacian = p * (s - variance) / (variance ** 2) return Laplacian def makefilter(scale, PhaseX, PhaseY, Points, k): gx = Gaussian1D(3 * scale, 0, Points[0, ...], PhaseX) gy = Gaussian1D(scale, 0, Points[1, ...], PhaseY) image = gx * gy image = np.reshape(image, (k, k)) return image def binary(image, bins): binary_img = image * 0 for r in range(0, image.shape[0]): for c in range(0, image.shape[1]): if image[r, c] == bins: binary_img[r, c] = 1 else: binary_img[r, c] = 0 return binary_img def compute_gradient(maps, numbins, mask_1, mask_2): maps = maps.astype(np.float64) gradient = np.zeros((maps.shape[0], maps.shape[1], 12)) for m in range(0, 12): chi_square = np.zeros((maps.shape)) for i in range(1, numbins): tmp = binary(maps, i) g_i = cv2.filter2D(tmp, -1, mask_1[m]) h_i = cv2.filter2D(tmp, -1, mask_2[m]) chi_square = chi_square + ((g_i - h_i) ** 2) / (g_i + h_i + 0.0001) gradient[:, :, m] = chi_square return gradient #Half-Disk. def half_disk(radius): halfd_ = np.zeros((radius * 2, radius * 2)) rad_ = radius ** 2; for i in range(0, radius): m = (i - radius) ** 2 for j in range(0, 2 * radius): if m + (j - radius) ** 2 < rad_: halfd_[i, j] = 1 return halfd_ #Filters. def Oriented_DoG(): sigma = [1, 3] orients = 16 orientation = np.arange(0, 360, 360 / orients) plt.figure(figsize=(25, 5)) val = [] for i in range(0, len(sigma)): kernel = (GaussianKernel(7, sigma[i])) for j in range(0, orients): filterg = rotateImage(kernel, orientation[j]) val.append(filterg) plt.suptitle("OrientedDoG") plt.subplot(len(sigma), orients, orients * (i) + j + 1) plt.axis('off') plt.imshow(val[orients * (i) + j], cmap='gray') plt.show() return val def LML(): k = 49 scaleX = np.sqrt(2) ** np.array([1, 2, 3]) Orientation = 6 Rotation_ = 12 Bar_ = len(scaleX) * Orientation Edge_ = len(scaleX) * Orientation nF = Bar_ + Edge_ + Rotation_ F = np.zeros([k, k, nF]) hK = (k - 1) / 2 x = [np.arange(-hK, hK + 1)] y = [np.arange(-hK, hK + 1)] [x, y] = np.meshgrid(x, y) orgPts = [x.flatten(), y.flatten()] orgPts = np.array(orgPts) count = 0 for scale in range(len(scaleX)): for orient in range(Orientation): angle = (np.pi * orient) / Orientation cosine_ = np.cos(angle) sin_ = np.sin(angle) rotPts = [[cosine_, -sin_], [sin_, cosine_]] rotPts = np.array(rotPts) rotPts = np.dot(rotPts, orgPts) # print(rotPts) F[:, :, count] = makefilter(scaleX[scale], 0, 1, rotPts, k) F[:, :, count + Edge_] = makefilter(scaleX[scale], 0, 2, rotPts, k) count = count + 1 count = Bar_ + Edge_ scales = np.sqrt(2) ** np.array([1, 2, 3, 4]) for i in range(len(scales)): F[:, :, count] = Gaussian2D(k, scales[i]) count = count + 1 for i in range(len(scales)): F[:, :, count] = LOG2D(k, scales[i]) count = count + 1 for i in range(len(scales)): F[:, :, count] = LOG2D(k, 3 * scales[i]) count = count + 1 plt.figure(figsize=(12, 8)) for i in range(0, 48): plt.subplot(6, 8, i + 1) plt.axis('off') plt.imshow(F[:, :, i], cmap='gray') plt.suptitle("LML") plt.show() return F def LMS(): k = 49 scaleX = np.sqrt(2) **
np.array([0, 1, 2])
numpy.array
from torch.utils.data import Dataset import numpy as np import random import os import cv2 import torch from utils.ParamList import ParamList from preprocess import transforms from torch.utils.data import DataLoader from datasets.data.kitti.devkit_object import utils as kitti_utils from utils import data_utils class SeqDatasetReader(Dataset): def __init__(self, root, config, augment=None, is_training=True, split='training'): super(SeqDatasetReader, self).__init__() self._root = root self._config = config self._augment = augment self._classes = kitti_utils.name_2_label(config.DATASET.OBJs) self._relate_classes = kitti_utils.name_2_label(config.DATASET.RELATE_OBJs) self.is_training = is_training self._split = split self._aug_params = { 'hsv_h': config.DATASET.aug_hsv_h, 'hsv_s': config.DATASET.aug_hsv_s, 'hsv_v': config.DATASET.aug_hsv_v, 'degrees': config.DATASET.aug_degrees, 'translate': config.DATASET.aug_translate, 'scale': config.DATASET.aug_scale, 'shear': config.DATASET.aug_shear, } self._img_size = [config.INPUT_SIZE[0]] * 2 self._is_rect = config.IS_RECT self._norm_params = { 'mean_rgb': np.array(config.DATASET.MEAN, np.float32).reshape((1, 1, 3)), 'std_rgb': np.array(config.DATASET.STD, np.float32).reshape((1, 1, 3)) } self._Ks = {} self._Ts = {} self._load_seqs() self._transform = transforms.Compose([ transforms.Normalize(), transforms.ToTensor(), transforms.ToNCHW() ]) self._start_index = 0 def set_start_index(self, start_index): self._start_index = start_index @property def samples(self): return self._samples def __len__(self): return len(self._samples) - self._start_index def __getitem__(self, index): index += self._start_index assert index < len(self._samples), 'out of bounds index = %s !' % index img = self.load_image(index) target = ParamList((img.shape[1], img.shape[0])) K, T = self.load_calib_param(index) _labels = self.load_labels(index) N = 1 if len(_labels) > 0: cls, noise_mask, repeats = self._transform_obj_label(_labels[index][:, 0].copy()) _labels = np.repeat(_labels, repeats=repeats, axis=0) N = len(cls) target.add_field('class', cls) target.add_field('img_id',
np.zeros((N,), dtype=np.int)
numpy.zeros
from .nmnet import NMNet from .nmnet import AverageMeter from .pt import utils import numpy as np import torch import torch.backends.cudnn as cudnn import torchvision.transforms as transforms from skimage.transform import resize as imresize import time import shutil import os import torchnet as tnt from torch import nn from torch.utils.data import DataLoader from torch.utils.data.dataloader import default_collate from torch.utils.data.distributed import DistributedSampler class PyNet(NMNet): def __init__(self): super(PyNet, self).__init__() self.distributed_train_loader = None def _get_num_parameters(self): return sum(p.numel() for p in self.model.parameters() if p.requires_grad) def _set_data_providers(self, train_data_provider, val_data_provider, train_batch_size, val_batch_size, num_workers=4, train_drop_last_batch=False, val_drop_last_batch=False, train_collate_fn=default_collate, val_collate_fn=default_collate): train_sampler = None if self.is_distributed: train_sampler = DistributedSampler(train_data_provider, num_replicas=self.world_size, rank=self.rank) if train_collate_fn is None: train_collate_fn = default_collate if val_collate_fn is None: val_collate_fn = default_collate if train_data_provider is not None: self.train_loader = DataLoader(train_data_provider, batch_size=train_batch_size, shuffle=(train_sampler is None), num_workers=num_workers, pin_memory=True, drop_last=train_drop_last_batch, sampler=train_sampler, collate_fn=train_collate_fn) if val_data_provider is not None: self.val_loader = DataLoader(val_data_provider, batch_size=val_batch_size, shuffle=False, drop_last=val_drop_last_batch, num_workers=num_workers, pin_memory=True, collate_fn=val_collate_fn) def _forward(self, input, **kwargs): if len(kwargs) > 0 and kwargs['extra_pars'] is not None: return self.model(input, **kwargs) return self.model(input) def _loss(self, input, target, **kwargs): if len(kwargs) > 0 and kwargs['extra_pars'] is not None: return self.criterion(input, target, **kwargs) return self.criterion(input, target) def forward_embedding(self, tensor): return self.forward(tensor) def to_gpu(self): device = torch.device("cuda" if torch.cuda.is_available() and self.use_gpu else "cpu") print(' ==> Moving the network model to {}'.format(device)) # If the model is trained with the distributed architecture, it has already been transferred to CUDA if not self.is_distributed: self.model.to(device) if self.criterion is not None: self.criterion.to(device) def get_preprocessed_tensor(self, numpy_image, mu, std, size): transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize(mu, std)]) numpy_image = imresize(numpy_image, (size[0], size[1], size[2])).astype(dtype=numpy_image.dtype) im1 = transform(numpy_image) im1.resize_(1, size[2], size[0], size[1]) if self.use_gpu: im1 = im1.to("cuda") return im1 def extract_features(self, tensor, layer_names=None, linearize=False): if layer_names is None: layer_names = self.embedding_layers self.model.eval() self.set_extract_features_layers(layer_names) self.forward_embedding(tensor) return self.get_embeddings(linearize=linearize) def clear_embedding_listeners(self): # Remove all embedding hooks for ha in self.embedding_hook_handles: ha.remove() self.embedding_hook_handles = [] def set_embeddings_listeners(self, layer_names): # Clear any previous set embedding listener self.clear_embedding_listeners() # Reset embedding dicts self.embedding = [] embedding = dict() id_module_mapping = dict() # Loop over embedding layers for ii, layer in enumerate(layer_names): m = self.model n_dims = 1 # Handle parallel nets if isinstance(m, torch.nn.DataParallel): m = m.module n_dims = len(self.model.device_ids) # Get down to the selected layer for ll in layer.split('\\'): m = m._modules.get(ll) # Initialize the embedding tensors # id_module_mapping[id(m)] = layer # NB => this was the best way.. however if we are using a parallel net, then the id of the modules are different # on each replica.. thus embedding[id(m)] will fail at the hooks.. # To overcome this, we use the module name as string.. hopefully.. we use different names for the layers.. id_module_mapping[str(m)] = layer for ii in range(n_dims): key = '{}~~~{}'.format(layer,ii) embedding[key] = torch.FloatTensor() # Hook function that fills the embedding vectors def hook_fun(m, i, o): # tvec is a vector of BATCH x FEATS tvec = o.to("cpu").clone() ii = 0 if o.is_cuda: ii = o.get_device() layer = id_module_mapping[str(m)] key = '{}~~~{}'.format(layer,ii) # Concatenate on batch dimension embedding[key] = torch.cat((embedding[key], tvec), dim=0) # Save hook handles self.embedding_hook_handles.append(m.register_forward_hook(hook_fun)) # Link the embedding vectors to the class object self.embedding = embedding def get_embeddings(self, linearize=False): """ :type linearize: boolean """ embedding =
np.array([])
numpy.array
# # """ Created on Mon Jun 29 17:30:58 2020 author: <NAME>; building on code from <NAME>/<NAME> Improvements by <NAME> June 2021 (clean-up and allowing verious rivers to be analysed in one Python program) See: https://github.com/Flood-Excess-Volume """ # Imports import matplotlib.pyplot as plt import pandas as pd from pandas import read_excel import bisect import numpy as np from array import * from pandas import datetime import calendar # Definitions/functions used def scale(x): return ((x-min(x))/(max(x)-min(x))) def unscaletime(x): return (((max(time) - min(time)) * x) + min(time)) def Q(x): # Discharge Q given the water level h = x iff rating curve is given; here for EA-type rating curves z = 0 while z < w[-1]: if x > lower_limits[z] and x <= upper_limits[z]: y = (c[z] * ((x - a[z]) ** b[z])) break elif x > upper_limits[z]: z = z + 1 else: y = (c[w[-1]] * ((x - a[w[-1]]) ** b[w[-1]])) return (y) # # Data import: river height data saved into a csv file # # The first column must have the heading 'Time', with time values converted into days (with the digits beyond the # decimal point representing what the hours and seconds elapsed are in terms of a fraction of a day, # and the second column must have the heading 'Height'. See also: # - https://github.com/Flood-Excess-Volume (Python: River Don) # - https://github.com/Rivers-Project-2018/How-to-do-FEV-Analysis/blob/master/README.md) # # * Input river data # * Chosen threshold height: ht # * overall error # # nriver = 4 # 2 # choice of river # nratingc = 1 # 1 # when there is a rating curve: value is 1; when flow data are given value is 0 # nriverflag = 0 # flags whether adjustment of array is needed; 0 means no; >0 means yes plus amount # nlength = length of array used when nriverflag = 1 ncheck = 1 # Superfluous test/check figures ; set ncheck=0 to remove (nriver,nratingc,nriverflag,nlength) = (10,1,0,0) (nriver,nratingc,nriverflag,nlength) = (13,1,10,1000) (nriver,nratingc,nriverflag,nlength) = (13,1,370,100) (nriver,nratingc,nriverflag,nlength) = (13,1,375,30) (nriver,nratingc,nriverflag,nlength) = (10,0,0,0) # (nriver,nratingc,nriverflag,nlength) = (13,1,0,0) # if nriver == 1: # River Don Rotherham/Tesco 24-10-2019 to 13-11-2019 ; (nriver,nractingc,nriverflag)=(1 0 0 0) Data = pd.read_csv('DonTesco201911.csv') # Suboptimal: not the source file from EA but editted file; needs to be source file # Full data file: RotherhamTesco_F0600_15minFlow_241019_131119.csv and RotherhamTesco_F0600_15minStage_241019_131119.csv ht = 1.9 # Threshold error = 0.08 # upper bounds chosen stitle = 'River Don at Rotherham/Tesco 2019' # elif nriver == 2: # River Aire test case 26/27-12-2015 ; (nriver,nractingc,nriverflag)=(2 1 0) Data = pd.read_csv('Aire15.csv') # Full data files: "Armley F1707 Stage 15min May 15 to Mar 16.csv" "Armley F1707 Flow 15min May 15 to Mar 16.csv" # 01/05/2015 00:00:00 to 30/03/2016 23:45:00 ht = 3.9 # Rating curve coeffecients, listed starting from lowest range of heights up until heighest range. # Note that Q(h) = c_j*(h-a_j)**b_j a = [0.156, 0.028, 0.153] b = [1.115, 1.462, 1.502] c = [30.96, 27.884, 30.127] # Upper and lower limits of ranges of river heights given for rating curve. lower_limits = [0.2, 0.685, 1.917] upper_limits = [0.685, 1.917, 4.17] error = [0.0542, 0.0344, 0.0528] error = 0.055 # upper bounds chosen stitle = 'River Aire at Leeds/Armley 2015' elif nriver == 3: # River Don Rotherham 2007 data set too large; needs to start later in current file ; (nriver,nractingc,nriverflag)=(3 0 1200) Data = pd.read_csv('RotherhamDon2007.csv') # Suboptimal: not a source file from EA but an editted file; needs to besource file # Full data source file: Rotherham_L0601_15minStage_241019_131119.csv 24/10/2019 00:00:00 to 13/11/2019 23:45:00 ht = 1.9 error = 0.08 # example nriverflag = 1200 stitle = 'River Don at Rotherham 2007' print('WARNING nriver=3: slicing done') elif nriver == 4: # River Don 2007 Sheffield Hadfields data set too large ; (nriver,nractingc,nriverflag)=(4 1 0) Data = pd.read_csv('SheffieldHadfields2007.csv') # Suboptimal: not a source file from EA but an editted file; needs to be the source file # Full data source file: SheffieldHadfields_F0605_15minStage_010107_140820.csv and SheffieldHadfields_F0605_15minFlow_010107_140820.csv # 01/01/2007 00:00:00 to 14/08/2020 23:45:00 ht = 2.9 # Note that Q(h) = c_j*(h-a_j)**b_j # different from River Aire case in excel file parameters.xlsx with parameters (a<->b coefficients interchanged) b = [1.3803, 1.2967, 1.1066] a = [0.3077, 0.34, -0.5767] c = [77.2829, 79.5656, 41.3367] # Upper and lower limits of ranges of river heights given for rating curve. lower_limits = [0.39, 0.927, 1.436] upper_limits = [0.927, 1.426, 3.58] error = 0.0799 # overall upper bounds stitle = 'River Don at Sheffield/Hadfields 2007' print('WARNING nriver=4: FEV_mac, FEV_min different; sort out role of QT_min, QT_max?') elif nriver == 5: # River Don 2019 Sheffield Hadfields data set too large ; (nriver,nractingc,nriverflag)=(5 1 0) Data = pd.read_csv('SheffieldHadfields2019.csv') # Suboptimal: not a source file from EA but editted file; needs to be source file # Full data source file: SheffieldHadfields_F0605_15minStage_010107_140820.csv and SheffieldHadfields_F0605_15minFlow_010107_140820.csv # 01/01/2007 00:00:00 to 14/08/2020 23:45:00 ht = 2.9 # Note that Q(h) = c_j*(h-a_j)**b_j # different from River Aire case in excel file parameters.xlsx with parameters (a<->b coefficients interchanged) b = [1.3803, 1.2967, 1.1066] a = [0.3077, 0.34, -0.5767] c = [77.2829, 79.5656, 41.3367] # Upper and lower limits of ranges of river heights given for rating curve. lower_limits = [0.39, 0.927, 1.436] upper_limits = [0.927, 1.426, 3.58] error = 0.0799 # overal upper bounds stitle = 'River Don at Sheffield/Hadfields 2019' print('WARNING nriver=5: FEV_mac, FEV_min different; sort out role of QT_min, QT_max?') elif nriver == 10: # River Ciliwung updated by Onno 01-017-2021 ; (nriver,nractingc,nriverflag)=(10 1 0 0) Nico Septianus' case # Full data source file: error see Septianus' thesis # Data = pd.read_excel(xlsx_file,sheetname='ciliwungdata.xlsx') # Data = pd.read_excel(r'ciliwungdata.xlsx', engine='openpyxl') # Works # Data = pd.read_excel('ciliwungdata.xlsx') # Data = pd.read_csv('ciliwungdata.csv') # Works Data = pd.read_csv('ciliwungdatashunyu.csv') # Works ht = 2.8 error = 0.05 stitle = 'River Ciliwung flood 2020 at Depok floodgate, Djakarta' # Different from River Ciliwung case in excel file parameters.xlsx with parameters (a<->b coefficients interchanged?) # Note that Q(h) = c_j*(h-a_j)**b_j c = [11.403] a = [-0.2] b = [1.715] # y = (c[w[-1]] * ((x - a[w[-1]]) ** b[w[-1]])) Qt = c[0]*(ht-a[0])**(b[0]) # Q(ht) should work TODO # Upper and lower limits of ranges of river heights given for rating curve. lower_limits = [0.0] upper_limits = [10] print(' nriver=10: Ciliwung river, cut-off not precise') elif nriver == 11: # New river ; (nriver,nractingc,nriverflag)=(11 0 0) # Full data source file: # Data = pd.read_csv('TBD.csv') # ht = TBD # error = TBD stitle = 'River TBD at TBD' print(' nriver=11: working.') elif nriver == 12: # River Ouse flood 2015; (nriver,nractingc,nriverflag)=(12 0 0 0) Antonia Feilden's 2018 project case 2015 York, 2000? York? # Full data source file: Data = pd.read_csv('skelton 2015.csv') ht = 6.17 error = 0.08 stitle = 'River Ouse flood 2015 at Skelton in York' print(' nriver=13: working but standard case does not apply given the hysteresis.') elif nriver == 13: # River Tamar in Devon; (nriver,nratingc,nriverflag,nlength) = (13,1,370,100) Onno's Matlab case 2018 # Full data source file: now hourly data Data = pd.read_csv('river-tamar-gulworthy-gunnislake.csv') ht = 2.95 error = 0.08 lower_limits = [0.1890, 0.3650] upper_limits = [0.3650, 3.9840] c = [30.4515824141758, 31.4420090976431] b = [3.89481846477192, 1.99812525109993] a = [-0.237667493077846, -0.00174326407201127] stitle = 'River Tamar flood 2013 at Gunnislake' print(' nriver=13: not yet working TBD.') # # Read in time and height data from "Data" file and make special plots # if nriver == 10: # Ciliwung river case is special time = Data['Day'] # Flow = Data['Flowrate'] # Fails to read in; why? height = Data['Riverheight'] elif nriver == 12: # Ouse river case is special time = Data['Day'] height = Data['Stage'] Flow = Data['Flow'] # Specific for case where flow data given as well instead of rating curve; reprogram elif nriver == 13: # Tamar river case is special datum = Data['date'] tijd = pd.to_datetime(datum) yr = tijd.dt.year time = tijd.dt.dayofyear # ts = pd.Timestamp(tijd) for jj in range(0,len(datum)): ts = pd.Timestamp(datum[jj]) aa = np.ceil(ts.to_julian_date()) aa = aa.astype(int) time[jj] = aa # aa = np.ceil(ts.to_julian_date('24-12-2013')) # aa = aa.astype(int) time = time-time[0] time = time-nriverflag-2 height = Data['avg_level'] heightmin = Data['min_level'] heightmax = Data['max_level'] else: # main case time = Data['Time'] height = Data['Height'] # # Establish flow/discharge data either from "Data" file or via rating curve # if nratingc == 1: w = [] for i in range(len(a)): w.append(i) qt = Q(ht) # For case with rating curve given and no given flow data qtmin = (1.0-error)*qt # Use an overall error upper bound qtmax = (1.0+error)*qt # Use an overall error upper bound print('hallo qt two:',qt) Flow = [] Flowmin = [] Flowmax = [] for i in height: Flow.append(Q(i)) Flowmin.append((1.0-error)*Q(i)) # Use an overall error upper bound Flowmax.append((1.0+error)*Q(i)) # Use an overall error upper bound scaledFlow = [] for i in Flow: scaledFlow.append((i - min(Flow)) / (max(Flow) - min(Flow))) if nriverflag > 0 and nlength == 0: # slice arrays to ignore first nriverflag number of data; clumsy slicing set up lent = len(time) time1 = np.zeros(lent-nriverflag) height1 = np.zeros(lent-nriverflag) Flow1 = np.zeros(lent-nriverflag) time1[0:lent-nriverflag] = time[nriverflag:lent] # height1[0:lent-nriverflag] = height[nriverflag:lent] Flow1[0:lent-nriverflag] = Flow[nriverflag:lent] del time, height, Flow (time, height, Flow) = (time1, height1, Flow1) del time1, height1, Flow1 print('HALLO111 HALLO111') # print('hallo WARNING: very clumsy way of slicing and reducing; please improve') elif nriverflag > 0 and nlength > 0: # slice arrays from nriverflag to nriverflag+nlength lent = len(time) time1 = np.zeros(nlength) height1 = np.zeros(nlength) Flow1 = np.zeros(nlength) time1[0:nlength] = time[nriverflag:nriverflag+nlength] # height1[0:nlength] = height[nriverflag:nriverflag+nlength] Flow1[0:nlength] = Flow[nriverflag:nriverflag+nlength] (time2, height2, Flow2) = (time, height, Flow) del time, height, Flow time = np.zeros(nlength) height = np.zeros(nlength) Flow = np.zeros(nlength) time = time1 height = height1 Flow = Flow1 if nriver == 13: heightmin1 = np.zeros(nlength) heightmax1 = np.zeros(nlength) heightmin1[0:nlength] = heightmin[nriverflag:nriverflag+nlength] heightmax1[0:nlength] = heightmax[nriverflag:nriverflag+nlength] del heightmin, heightmax heightmin = np.zeros(nlength) heightmax = np.zeros(nlength) heightmin = heightmin1 heightmax = heightmax1 # (time, height, Flow) = (time1, height1, Flow1) # del time1, height1, Flow1 # print('HALLO time same same',time,nriverflag,nlength) elif nratingc == 0: if nriver == 10: # special case Ciliwung river Flow = Data['Flowrate'] # Specific for case where flow data given as well instead of rating curve; TODO reprogram does not work else: Flow = Data['Flow'] # Specific for case where flow data given as well instead of rating curve; reprogram nend = len(time) if nriverflag > 0 and nlength == 0: # slice arrays to ignore first nriverflag number of data; clumsy slicing set up lent = len(time) time1 = np.zeros(lent-nriverflag) height1 = np.zeros(lent-nriverflag) Flow1 = np.zeros(lent-nriverflag) time1[0:lent-nriverflag] = time[nriverflag:lent] # height1[0:lent-nriverflag] = height[nriverflag:lent] Flow1[0:lent-nriverflag] = Flow[nriverflag:lent] del time, height, Flow (time, height, Flow) = (time1, height1, Flow1) del time1, height1, Flow1 # print('hallo WARNING: very clumsy way of slicing and reducing; please improve') elif nriverflag > 0 and nlength > 0: # slice arrays from nriverflag to nriverflag+nlength lent = len(time) time1 = np.zeros(nlength) height1 = np.zeros(nlength) Flow1 = np.zeros(nlength) time1[0:nlength] = time[nriverflag:nriverflag+nlength] # height1[0:nlength] = height[nriverflag:nriverflag+nlength] Flow1[0:nlength] = Flow[nriverflag:nriverflag+nlength] (time2, height2, Flow2) = (time, height, Flow) del time, height, Flow time = np.zeros(nlength) height = np.zeros(nlength) Flow = np.zeros(nlength) time = time1 height = height1 Flow = Flow1 if nriver == 13: # Tamar river heightmin1 = np.zeros(nlength) heightmax1 = np.zeros(nlength) heightmin1[0:nlength] = heightmin[nriverflag:nriverflag+nlength] heightmax1[0:nlength] = heightmax[nriverflag:nriverflag+nlength] del heightmin, heightmax heightmin = np.zeros(nlength) heightmax = np.zeros(nlength) heightmin = heightmin1 heightmax = heightmax1 #del time1, height1, Flow1 else: if nriver == 13: # Tamar river lent = len(time) heightmin1 = np.zeros(lent) heightmax1 = np.zeros(lent) heightmin1 = heightmin heightmax1 = heightmax del heightmin, heightmax heightmin = np.zeros(lent) heightmax = np.zeros(lent) heightmin = heightmin1 heightmax = heightmax1 print('nend, nendnew',nend,len(time)) # Superfluous print for checking scaledFlow = [] for i in Flow: scaledFlow.append((i - min(Flow)) / (max(Flow) - min(Flow))) # scaledFlow = scale(Flow) Flowmin = (1.0-error)*Flow Flowmax = (1.0+error)*Flow # Find qt, qtmin, qtmax nwin = 0 for i in range(1,len(height)): if height[i] > ht and height[i-1] < ht and nwin==0: qt = Flow[i-1]+(Flow[i]-Flow[i-1])*(ht-height[i-1])/(height[i]-height[i-1]) nwin = 1 if height[i] < ht and height[i-1] > ht and nwin==0: qt = Flow[i-1]+(Flow[i]-Flow[i-1])*(ht-height[i-1])/(height[i]-height[i-1]) nwin = 1 qtmin = (1.0-error)*qt qtmax = (1.0+error)*qt # # end of nratingc tell tale # print('hallo qt',qt) if ncheck == 111: # Superfluous test/check figures for sliced data ; set ncheck=0 at top to block plt.figure(101) plt.plot(height,Flow,'-') plt.xlabel('$h$ (m)',fontsize=16) plt.ylabel('$Q(h)$ (m$^3$/s)',fontsize=16) plt.figure(102) # Superfluous test/check figure plt.plot(time,Flow,'-') plt.xlabel('$t$ (days)',fontsize=16) plt.ylabel('$Q(t)$ (m$^3$/s)',fontsize=16) if nriver == 1: # River <NAME> ncheckk = 1 if ncheckk == 1: # Superfluous test/check figures for sliced data ; set ncheck=0 at top to block ; plots used plt.figure(111) plt.plot(time,height,'-k', linewidth=2) plt.ylabel('$h(t)$ [m]',fontsize=16) plt.xlabel('$t$ [day]',fontsize=16) Qt = qt hmin = 0 hmax = 3.5 tmin = 0 tmax = 13 Qmin = 0 Qmax = 650 plt.plot([tmin,tmax],[ht,ht],'--k') plt.text(0.92*tmax, 1.05*ht, '$h_T$', size=18) plt.axis([tmin, tmax, hmin, hmax]) plt.title("a) Don, Rotherham Tesco") plt.figure(112) plt.plot(time,Flow,'-k', linewidth=2) plt.plot([tmin,tmax],[Qt,Qt],'--k') plt.text(0.92*tmax, 1.05*Qt, '$Q_T$', size=18) plt.ylabel('$Q(t)$ [m$^3/$s]',fontsize=16) plt.text(6.8, 280, '$FEV$', size=18, rotation=90) plt.xlabel('$t$ [day]',fontsize=16) plt.axis([tmin, tmax, Qmin, Qmax]) plt.title("a) Don, Rotherham Tesco") plt.figure(113) plt.plot(Flow,height,'-k', linewidth=2) plt.plot([0,Qt],[ht,ht],'--k') plt.text(0.1*Qmax, 1.05*ht, '$h_T$', size=18) plt.plot([Qt,Qt],[hmin,ht],'--k') plt.text(1.1*Qt, 0.05*ht, '$Q_T$', size=18) plt.plot([0,np.max(Flow)],[0,np.max(height)],'--b') plt.ylabel('$h$ [m]',fontsize=16) plt.xlabel('$Q$ [m$^3/$s]',fontsize=16) plt.axis([Qmin, Qmax, hmin, hmax]) plt.title("a) Don, Rotherham Tesco") elif nriver == 10: # Ciliwung river case is special ncheckk = 1 if ncheckk == 1: # Superfluous test/check figures for sliced data ; set ncheck=0 at top to block ; plots used plt.figure(111) plt.plot(time,height,'-k', linewidth=2) plt.ylabel('$h(t)$ [m]',fontsize=16) plt.xlabel('$t$ [day]',fontsize=16) hmin = 0 hmax = 4.5 tmin = 1 tmax = 2 Qmin = 0 Qmax = 150 plt.plot([tmin,tmax],[ht,ht],'--k') plt.text(0.92*tmax, 1.05*ht, '$h_T$', size=18) plt.axis([tmin, tmax, hmin, hmax]) plt.title("b) Ciliwung, Depok Floodgate") plt.figure(112) plt.plot(time,c[0]*(height-a[0])**b[0],'-k', linewidth=2) plt.plot([tmin,tmax],[Qt,Qt],'--k') plt.text(0.92*tmax, 1.05*Qt, '$Q_T$', size=18) plt.text(1.5, 90, '$FEV$', size=18) plt.ylabel('$Q(t)$ [m$^3/$s]',fontsize=16) plt.xlabel('$t$ [day]',fontsize=16) plt.axis([tmin, tmax, Qmin, Qmax]) plt.title("b) Ciliwung, Depok Floodgate") plt.figure(113) Nhh = 50 dhh = (hmax-a[0])/Nhh hh = np.zeros(Nhh+1) for jj in range (0,Nhh+1): hh[jj] = a[0]+jj*dhh print('jj',jj,dhh,hh[jj],c[0]*(hh[jj]-a[0])**(b[0])) # Q(hh(jj)) should work TODO plt.plot(c[0]*(hh-a[0])**(b[0]),hh,'-k', linewidth=2) # Q(hh) should work TODO plt.plot([0,Qt],[ht,ht],'--k') plt.text(0.1*Qmax, 1.05*ht, '$h_T$', size=18) plt.plot([Qt,Qt],[hmin,ht],'--k') plt.text(1.1*Qt, 0.05*ht, '$Q_T$', size=18) plt.plot([0,np.max(c[0]*(hh-a[0])**b[0])],[0,np.max(hh)],'--b') # Q(hh) should work TODO plt.ylabel('$h$ [m]',fontsize=16) plt.xlabel('$Q$ [m$^3/$s]',fontsize=16) plt.axis([Qmin, Qmax, hmin, hmax]) plt.title("b) Ciliwung, Depok Floodgate") elif nriver == 12: # Ouse river case is special ncheckk = 1 for jj in range (1,len(height)): # assuming there is only one pair of pass throughs if height[jj-1] < ht and height[jj] > ht: jup = jj if height[jj-1] > ht and height[jj] < ht: jdown = jj qtup = Flow[jup] # approximation qtdown = Flow[jdown] # approximation if ncheckk == 1: # Superfluous test/check figures for sliced data ; set ncheck=0 at top to block ; plots used plt.figure(111) plt.plot(time,height,'-k', linewidth=2) plt.ylabel('$h(t)$ [m]',fontsize=16) plt.xlabel('$t$ [day]',fontsize=16) hmin = 0 hmax = 8 tmin = 0 tmax = 6 Qmin = 0 Qmax = 600 z = np.zeros(4) z = np.polyfit(height,Flow, 4) print('z',z) AA = np.zeros((4,4)) ff = np.zeros(4) d11 = np.inner(height,height) d21 = np.inner(height**2,height) d31 = np.inner(height**3,height) d41 = np.inner(height**4,height) d22 = np.inner(height**2,height**2) d32 = np.inner(height**3,height**2) d42 = np.inner(height**4,height**2) d33 = np.inner(height**3,height**3) d43 = np.inner(height**4,height**3) d44 = np.inner(height**4,height**4) ff[0] = np.inner(Flow,height) ff[1] = np.inner(Flow,height**2) ff[2] = np.inner(Flow,height**3) ff[3] = np.inner(Flow,height**4) AA = np.array([[d11,d21,d31,d41],[d21,d22,d32,d42],[d31,d32,d33,d43],[d41,d42,d43,d44]]) z = np.linalg.solve(AA, ff) Qt = z[0]*ht + z[1]*ht**2 + z[2]*ht**3 + z[3]*ht**4 print('z',z) a0 = -9539.05 a1 = 7056.26 a2 = -1909.17 a3 = 227.97 a4 = -9.97 Flowqh = 0.0*Flow Flowqh = z[0]*height + z[1]*height**2 + z[2]*height**3 + z[3]*height**4 plt.plot([tmin,tmax],[ht,ht],'--k') plt.text(0.92*tmax, 1.05*ht, '$h_T$', size=18) plt.axis([tmin, tmax, hmin, hmax]) plt.title("c) Ouse, Skelton, York") plt.figure(112) # plt.plot(time,c[0]*(height-a[0])**b[0],'-k', linewidth=2) plt.plot(time,Flow,'-k', linewidth=2) plt.plot(time,Flowqh,'--k', linewidth=2) plt.plot([tmin,tmax],[Qt,Qt],'--k', linewidth=1) plt.plot([tmin,tmax],[qtup,qtup],'--b', linewidth=2) plt.plot([time[jup],time[jdown]],[qtup,qtdown],'--r', linewidth=3) plt.plot([tmin,tmax],[qtdown,qtdown],'--b', linewidth=2) plt.ylabel('$Q(t)$ [m$^3/$s]',fontsize=16) plt.xlabel('$t$ [day]',fontsize=16) plt.axis([tmin, tmax, Qmin, Qmax]) plt.title("c) Ouse, Skelton, York") plt.text(0.85*tmax, 1.02*Qt, '$Q_T$', size=18) plt.text(0.85*tmax, 1.04*qtup, '$Q_{T_{up}}$', size=18) plt.text(0.85*tmax, 0.92*qtdown, '$Q_{T_{down}}$', size=18) plt.figure(113) Nhh = 50 dhh = hmax/Nhh hh = np.zeros(Nhh+1) Qq = np.zeros(Nhh+1) for jj in range (0,Nhh+1): hh[jj] = jj*dhh Qq[jj] = z[0]*hh[jj] + z[1]*hh[jj]**2 + z[2]*hh[jj]**3 + z[3]*hh[jj]**4 plt.plot(Qq,hh,'--k', linewidth=2) plt.plot(Flow,height,'-k', linewidth=2) plt.plot([0,Qmax],[ht,ht],'--k') plt.plot([Qt,Qt],[hmin,ht],'--k') plt.plot([qtup,qtup],[hmin,ht],':b') plt.plot([qtdown,qtdown],[hmin,ht],':b') plt.plot([0,np.max(Flow)],[0,np.max(height)],'--b') plt.text(1.02*Qt, 0.05*ht, '$Q_T$', size=18) plt.text(1.02*qtup, 0.05*ht, '$Q_{T_{up}}$', size=18) plt.text(0.8*qtdown, 0.05*ht, '$Q_{T_{down}}$', size=18) plt.text(0.1*Qmax, 1.05*ht, '$h_T$', size=18) plt.ylabel('$h$ [m]',fontsize=16) plt.xlabel('$Q$ [m$^3/$s]',fontsize=16) plt.axis([Qmin, Qmax, hmin, hmax]) plt.title("c) Ouse, Skelton, York") elif nriver == 13: # Tamar river case is special ncheckk = 1 if ncheckk == 1: # Superfluous test/check figures for sliced data ; set ncheck=0 at top to block ; plots used hmin = 0 hmax = 3.5 tmin = 0 # nriverflag tmax = nlength # nriverflag+nlength Qmin = 0 Qmax = 350 plt.figure(111) print('lengte',len(Flow),nlength, time, height) # for jj in range (1,nlength): # p plt.plot([time[jj],time[jj]+1],[height[jj],height[jj]],'-k', linewidth=2) # p plt.plot([time[jj],time[jj]+1],[heightmin[jj],heightmin[jj]],'-k', linewidth=1) # p plt.plot([time[jj],time[jj]+1],[heightmax[jj],heightmax[jj]],'-k', linewidth=1) plt.plot(time,heightmin,':k', linewidth=1) plt.plot(time,height,'-k', linewidth=2) plt.plot(time,heightmax,':k', linewidth=1) plt.plot([tmin,tmax],[ht,ht],'--k') plt.text(0.92*tmax, 1.04*ht, '$h_T$', size=18) plt.axis([tmin, tmax, hmin, hmax]) plt.ylabel('$h(t)$ [m]',fontsize=16) # plt.xlabel('$t$ [day since 26-11-2012]',fontsize=16) plt.xlabel('$t$ [day]',fontsize=16) plt.title("d) Tamar, Gunnislake") plt.figure(112) Flowmin = np.zeros(nlength) Flowmax = np.zeros(nlength) for jj in range (1,nlength): Flowmin[jj] = Q(heightmin[jj]) Flowmax[jj] = Q(heightmax[jj]) # pfor jj in range (1,nlength): # p plt.plot([time[jj],time[jj]+1],[Flow[jj],Flow[jj]],'-k', linewidth=2) plt.plot(time,Flow,'-k', linewidth=2) plt.plot(time,Flowmin,':k', linewidth=1) plt.plot([tmin,tmax],[qt,qt],'--k') plt.text(0.92*tmax, 1.04*qt, '$Q_T$', size=18) plt.plot(time,Flowmax,':k', linewidth=1) plt.axis([tmin, tmax, Qmin, Qmax]) plt.ylabel('$Q(t)$ [m$^3/$s]',fontsize=16) # plt.xlabel('$t$ [day since 26-11-2012]',fontsize=16) plt.xlabel('$t$ [day]',fontsize=16) plt.title("d) Tamar, Gunnislake") plt.figure(113) plt.plot(Flow2,height2,'.k', linewidth=2) plt.plot([qt,qt],[hmin,ht],'--k') plt.plot([0,Qmax],[ht,ht],'--k') plt.ylabel('$h$ [m]',fontsize=16) plt.xlabel('$Q$ [m$^3/$s]',fontsize=16) plt.text(1.02*qt, 0.05*ht, '$Q_T$', size=18) plt.text(0.1*Qmax, 1.05*ht, '$h_T$', size=18) plt.axis([Qmin, Qmax, hmin, hmax]) plt.title("d) Tamar, Gunnislake") # # Scalings: # time_increment=(time[1]-time[0])*24*3600 number_of_days=int((len(time)*(time[1]-time[0]))) scaledtime = scale(time) scaledheight = scale(height) error_height_up = [i * (1+error) for i in height] error_height_down = [i * (1-error) for i in height] scaledtime = scale(time) scaledheight = scale(height) # Figures fig, ax = plt.subplots() plt.rcParams["figure.figsize"] = [12, 12] plt.rcParams['axes.edgecolor'] = 'white' fig, ax = plt.subplots() ax.spines['left'].set_position(('zero')) ax.spines['bottom'].set_position(('zero')) ax.spines['left'].set_color('black') ax.spines['bottom'].set_color('black') scaledFlow_up = [i*(1+error) for i in scaledFlow] scaledFlow_down = [i*(1-error) for i in scaledFlow] negheight = -scaledheight negday = -(scaledtime) # To change the colour, change 'conrflowerblue' to another colour such as 'pink'. ax.plot(negheight, scaledFlow, 'black', linewidth=1) ax.plot([0, -1], [0, 1], 'blue', linestyle='--', marker='', linewidth=2) ax.plot(scaledtime, scaledFlow, 'black', linewidth=1) ax.plot(negheight, negday, 'black', linewidth=1) scaledht = (ht - min(height)) / (max(height) - min(height)) scaledqt = (qt - min(Flow)) / (max(Flow) - min(Flow)) QT = [] for i in scaledFlow: i = scaledqt QT.append(i) SF =
np.array(scaledFlow)
numpy.array
""" Apply a (possibly multi-file) per-pixel spatial reference. Author: <NAME>, <EMAIL> """ import argparse import numpy as np import pandas as pd from osgeo import gdal from spectral.io import envi import logging import ray from typing import List import os import multiprocessing GLT_NODATA_VALUE=-9999 CRITERIA_NODATA_VALUE = -9999 def main(): parser = argparse.ArgumentParser(description='Integrate multiple GLTs with a mosaicing rule') parser.add_argument('rawspace_file', help='filename of rawspace source file or, in the case of a mosaic_glt, a text-file list of raw space files') parser.add_argument('glt_file') parser.add_argument('output_filename') parser.add_argument('--band_numbers', nargs='+', type=int, default=-1, help='list of 0-based band numbers, or -1 for all') parser.add_argument('--n_cores', type=int, default=-1) parser.add_argument('--log_file', type=str, default=None) parser.add_argument('--log_level', type=str, default='INFO') parser.add_argument('--run_with_missing_files', type=int, default=0, choices=[0, 1]) parser.add_argument('--ip_head', type=str) parser.add_argument('--redis_password', type=str) parser.add_argument('--one_based_glt', type=int, choices=[0, 1], default=0) parser.add_argument('--mosaic', type=int, choices=[0, 1], default=0) global GLT_NODATA_VALUE parser.add_argument('--glt_nodata_value', type=float, default=GLT_NODATA_VALUE) args = parser.parse_args() # Set up logging per arguments if args.log_file is None: logging.basicConfig(format='%(message)s', level=args.log_level) else: logging.basicConfig(format='%(message)s', level=args.log_level, filename=args.log_file) args.one_based_glt = args.one_based_glt == 1 args.run_with_missing_files = args.run_with_missing_files == 1 args.mosaic = args.mosaic == 1 GLT_NODATA_VALUE = args.glt_nodata_value # Log the current time logging.info('Starting apply_glt, arguments given as: {}'.format(args)) # Open the GLT dataset glt_dataset = gdal.Open(args.glt_file, gdal.GA_ReadOnly) glt = envi.open(args.glt_file + '.hdr').open_memmap(writeable=False, interleave='bip') if args.mosaic: rawspace_files = np.squeeze(np.array(pd.read_csv(args.rawspace_file, header=None))) else: rawspace_files = [args.rawspace_file] for _ind in range(len(rawspace_files)): first_file_dataset = gdal.Open(rawspace_files[_ind], gdal.GA_ReadOnly) if first_file_dataset is not None: break if args.band_numbers == -1: output_bands = np.arange(first_file_dataset.RasterCount) else: output_bands = np.array(args.band_numbers) # Build output dataset driver = gdal.GetDriverByName('ENVI') driver.Register() # TODO: careful about output datatypes / format outDataset = driver.Create(args.output_filename, glt.shape[1], glt.shape[0], len(output_bands), gdal.GDT_Float32, options=['INTERLEAVE=BIL']) outDataset.SetProjection(glt_dataset.GetProjection()) outDataset.SetGeoTransform(glt_dataset.GetGeoTransform()) del outDataset if args.n_cores == -1: args.n_cores = multiprocessing.cpu_count() rayargs = {'address': args.ip_head, '_redis_password': args.redis_password, 'local_mode': args.n_cores == 1} if args.n_cores < 40: rayargs['num_cpus'] = args.n_cores ray.init(**rayargs) jobs = [] for idx_y in range(glt.shape[0]): jobs.append(apply_mosaic_glt_line.remote(args.glt_file, args.output_filename, rawspace_files, output_bands, idx_y, args)) rreturn = [ray.get(jid) for jid in jobs] ray.shutdown() # Log final time and exit logging.info('GLT application complete, output available at: {}'.format(args.output_filename)) def _write_bil_chunk(dat: np.array, outfile: str, line: int, shape: tuple, dtype: str = 'float32') -> None: """ Write a chunk of data to a binary, BIL formatted data cube. Args: dat: data to write outfile: output file to write to line: line of the output file to write to shape: shape of the output file dtype: output data type Returns: None """ outfile = open(outfile, 'rb+') outfile.seek(line * shape[1] * shape[2] * np.dtype(dtype).itemsize) outfile.write(dat.astype(dtype).tobytes()) outfile.close() @ray.remote def apply_mosaic_glt_line(glt_filename: str, output_filename: str, rawspace_files: List, output_bands: np.array, line_index: int, args: List): """ Create one line of an output mosaic in mapspace Args: glt_filename: pre-built single or mosaic glt output_filename: output destination, assumed to location where a pre-initialized raster exists rawspace_files: list of rawspace input locations output_bands: array-like of bands to use from the rawspace file in the output line_index: line of the glt to process Returns: None """ logging.basicConfig(format='%(message)s', level=args.log_level, filename=args.log_file) glt_dataset = envi.open(glt_filename + '.hdr') glt = glt_dataset.open_memmap(writeable=False, interleave='bip') if line_index % 100 == 0: logging.info('Beginning application of line {}/{}'.format(line_index, glt.shape[0])) glt_line =
np.squeeze(glt[line_index, ...])
numpy.squeeze
#!/software/et_env/bin python import numpy as np from sklearn.decomposition import PCA import argparse def orig_to_trans(pars): gamma=pars[6] logM_env=pars[8] beta=pars[7] incl=pars[14] pars[6]=np.log10(2.1-1*gamma) pars[8]=
np.log10(-1.5-1*logM_env)
numpy.log10
""" Dataset for clip model """ import os import logging import copy import torch from torch.utils.data import Dataset, DataLoader import numpy as np import time import math import random import h5py from tqdm import tqdm from easydict import EasyDict as edict import sys sys.path.append(".") from AQVSR.utils.basic_utils import load_jsonl, load_json, load_from_feature_package def l2_normalize_np_array(np_array, eps=1e-5): """np_array: np.ndarray, (*, D), where the last dim will be normalized""" return np_array / (np.linalg.norm(np_array, axis=-1, keepdims=True) + eps) def pad_sequences_1d(sequences, dtype=torch.long): """ Pad a single-nested list or a sequence of n-d torch tensor into a (n+1)-d tensor, only allow the first dim has variable lengths Args: sequences: list(n-d tensor or list) dtype: torch.long for word indices / torch.float (float32) for other cases Returns: padded_seqs: ((n+1)-d tensor) padded with zeros mask: (2d tensor) of the same shape as the first two dims of padded_seqs, 1 indicate valid, 0 otherwise Examples: >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]] >>> pad_sequences_1d(test_data_list, dtype=torch.long) >>> test_data_3d = [torch.randn(2,3,4), torch.randn(4,3,4), torch.randn(1,3,4)] >>> pad_sequences_1d(test_data_3d, dtype=torch.float) """ if isinstance(sequences[0], list): sequences = [torch.tensor(s, dtype=dtype) for s in sequences] extra_dims = sequences[0].shape[1:] # the extra dims should be the same for all elements lengths = [len(seq) for seq in sequences] padded_seqs = torch.zeros((len(sequences), max(lengths)) + extra_dims, dtype=dtype) mask = torch.zeros(len(sequences), max(lengths)).float() for idx, seq in enumerate(sequences): end = lengths[idx] padded_seqs[idx, :end] = seq mask[idx, :end] = 1 return padded_seqs, mask # , lengths def pad_image_text_alignment(sparse_alignment: list, max_img_feat: int, padding_value: int): """ sparse_alignment: max_img_feat: return: N_img x max_img_feat x max_alignment_length B x max_img_feat x max_alignment_length sparse_alignment: [ [ # image 1 [1,2,3], # whole image feature to the position of text feature embedding [4,5,6,7], # bbox1 feature to the position of text feature embedding [8,9,10], # bbox2 feature to the position of text feature embedding ], ... [ #image 2 [1,2,3,4], [3,4,5,6,7], [8,9,10], ], ] ## Giving a sparse alignment matrix, return a dense one with padding; """ max_alignment_length = max([len(region_i) for image_i in sparse_alignment for region_i in image_i]) bs = len(sparse_alignment) padded_image_text_alignment = \ np.ones((bs, max_img_feat, max_alignment_length), dtype=np.int32) * padding_value for i, img_ in enumerate(sparse_alignment): for j, feat_ in enumerate(img_): padded_image_text_alignment[i,j,:len(feat_)] = feat_ return padded_image_text_alignment def collate_concat_segment(batch,mask_ctx_text=False, mask_ctx_vis=False): batch_collect = edict() for key in batch[0].keys(): batch_collect[key] = [item[key] for item in batch] pad_ctx_text_feat, ctx_text_mask = pad_sequences_1d(batch_collect.ctx_text_feat, dtype=torch.float) pad_ctx_vis_feat, ctx_vis_mask = pad_sequences_1d(batch_collect.ctx_vis_feat, dtype=torch.float) if mask_ctx_text: # mask transcript of video pad_ctx_text_feat = torch.zeros_like(pad_ctx_text_feat) ctx_text_mask = torch.zeros_like(ctx_text_mask) if mask_ctx_vis: # mask vision of video pad_ctx_vis_feat = torch.zeros_like(pad_ctx_vis_feat) ctx_vis_mask = torch.zeros_like(ctx_vis_mask) return edict( seg_id=batch_collect.seg_id, seg_name=batch_collect.seg_name, vid_name=batch_collect.vid_name, pad_ctx_vis_feat=pad_ctx_vis_feat, pad_ctx_text_feat=pad_ctx_text_feat, ctx_text_mask = ctx_text_mask, ctx_vis_mask = ctx_vis_mask ) def collate_for_concat_fusion(batch, mask_query_text=False, mask_query_img=False, mask_ctx_text=False, mask_ctx_vis=False ): # collate function for concat text embedding and vis embedding batch_collect = edict() for key in batch[0].keys(): batch_collect[key] = [item[key] for item in batch] pad_query_text_feat, query_text_mask = pad_sequences_1d(batch_collect.query_text_feat, dtype=torch.float) pad_query_vis_feat, query_vis_mask = pad_sequences_1d(batch_collect.query_vis_feat, dtype=torch.float) max_len_img_feat = pad_query_vis_feat.shape[1] pad_img_text_alignment = torch.from_numpy( pad_image_text_alignment(batch_collect.image_2_text_alignment, max_len_img_feat, padding_value=-1) ) pad_ctx_text_feat, ctx_text_mask = pad_sequences_1d(batch_collect.ctx_text_feat, dtype=torch.float) pad_ctx_vis_feat, ctx_vis_mask = pad_sequences_1d(batch_collect.ctx_vis_feat, dtype=torch.float) if mask_query_text: pad_query_text_feat = torch.zeros_like(pad_query_text_feat) query_text_mask = torch.zeros_like(query_text_mask) if mask_query_img: pad_query_vis_feat = torch.zeros_like(pad_query_vis_feat) query_vis_mask = torch.zeros_like(query_vis_mask) if mask_ctx_text: pad_ctx_text_feat = torch.zeros_like(pad_ctx_text_feat) ctx_text_mask = torch.zeros_like(ctx_text_mask) if mask_ctx_vis: pad_ctx_vis_feat = torch.zeros_like(pad_ctx_vis_feat) ctx_vis_mask = torch.zeros_like(ctx_vis_mask) return edict( meta = batch_collect.meta, pad_query_text_feat = pad_query_text_feat, query_text_mask = query_text_mask, pad_query_vis_feat = pad_query_vis_feat, query_vis_mask = query_vis_mask, image_2_text_alignment = pad_img_text_alignment, pad_ctx_text_feat = pad_ctx_text_feat, pad_ctx_vis_feat = pad_ctx_vis_feat, ctx_text_mask = ctx_text_mask, ctx_vis_mask = ctx_vis_mask ) def collate_for_concat_MarginRanking(batch): # collate function for concat text embedding and vis embedding batch_collect = edict() for key in batch[0].keys(): batch_collect[key] = [item[key] for item in batch] pad_query_text_feat, query_text_mask = pad_sequences_1d(batch_collect.query_text_feat, dtype=torch.float) pad_query_vis_feat, query_vis_mask = pad_sequences_1d(batch_collect.query_vis_feat, dtype=torch.float) max_len_img_feat = pad_query_vis_feat.shape[1] pad_img_text_alignment = torch.from_numpy( pad_image_text_alignment(batch_collect.image_2_text_alignment, max_len_img_feat, padding_value=-1) ) pad_pos_ctx_text_feat, pos_ctx_text_mask = pad_sequences_1d(batch_collect.pos_ctx_text_feat, dtype=torch.float) pad_pos_ctx_vis_feat, pos_ctx_vis_mask = pad_sequences_1d(batch_collect.pos_ctx_vis_feat, dtype=torch.float) pad_intra_neg_ctx_text_feat, intra_neg_ctx_text_mask = pad_sequences_1d(batch_collect.intra_neg_ctx_text_feat, dtype=torch.float) pad_intra_neg_ctx_vis_feat, intra_neg_ctx_vis_mask = pad_sequences_1d(batch_collect.intra_neg_ctx_vis_feat, dtype=torch.float) pad_inter_neg_ctx_text_feat, inter_neg_ctx_text_mask = pad_sequences_1d(batch_collect.inter_neg_ctx_text_feat, dtype=torch.float) pad_inter_neg_ctx_vis_feat, inter_neg_ctx_vis_mask = pad_sequences_1d(batch_collect.inter_neg_ctx_vis_feat, dtype=torch.float) return edict( meta = batch_collect.meta, pad_query_text_feat = pad_query_text_feat, query_text_mask = query_text_mask, pad_query_vis_feat = pad_query_vis_feat, query_vis_mask = query_vis_mask, image_2_text_alignment = pad_img_text_alignment, pad_pos_ctx_text_feat = pad_pos_ctx_text_feat, pad_pos_ctx_vis_feat = pad_pos_ctx_vis_feat, pos_ctx_text_mask = pos_ctx_text_mask, pos_ctx_vis_mask = pos_ctx_vis_mask, pad_intra_neg_ctx_text_feat = pad_intra_neg_ctx_text_feat, pad_intra_neg_ctx_vis_feat = pad_intra_neg_ctx_vis_feat, intra_neg_ctx_text_mask = intra_neg_ctx_text_mask, intra_neg_ctx_vis_mask = intra_neg_ctx_vis_mask, pad_inter_neg_ctx_text_feat = pad_inter_neg_ctx_text_feat, pad_inter_neg_ctx_vis_feat = pad_inter_neg_ctx_vis_feat, inter_neg_ctx_text_mask = inter_neg_ctx_text_mask, inter_neg_ctx_vis_mask = inter_neg_ctx_vis_mask ) def collate_for_adding_fusion(batch): # collate function for adding text embedding and vis embedding for fusion batch_collect = edict() for key in batch[0].keys(): batch_collect[key] = [item[key] for item in batch] pad_query_text_feat, query_text_mask = pad_sequences_1d(batch_collect.query_text_feat, dtype=torch.float) pad_query_vis_feat = torch.zeros( pad_query_text_feat.size()[:2] + (batch_collect.query_vis_feat[0].shape[-1],), dtype=pad_query_text_feat.dtype ) query_vis_mask = copy.deepcopy(query_text_mask) query_token_type_ids = torch.ones( pad_query_text_feat.shape[:2], dtype=torch.long ) for bidx, (vis_feat, i2t) in enumerate(zip(batch_collect.query_vis_feat, batch_collect.image_2_text_alignment)): for idx,region2pos in enumerate(i2t): pad_query_vis_feat[bidx][region2pos] = vis_feat[idx] if idx==0: # 0 stands for the whole image query_token_type_ids[bidx][region2pos] = 0 pad_ctx_text_feat, ctx_text_mask = pad_sequences_1d(batch_collect.ctx_text_feat, dtype=torch.float) pad_ctx_vis_feat, ctx_vis_mask = pad_sequences_1d(batch_collect.ctx_vis_feat, dtype=torch.float) return edict( meta = batch_collect.meta, pad_query_text_feat = pad_query_text_feat, query_text_mask = query_text_mask, pad_query_vis_feat = pad_query_vis_feat, query_vis_mask = query_vis_mask, query_token_type_ids = query_token_type_ids, image_2_text_alignment = batch_collect.image_2_text_alignment, pad_ctx_text_feat = pad_ctx_text_feat, pad_ctx_vis_feat = pad_ctx_vis_feat, ctx_text_mask = ctx_text_mask, ctx_vis_mask = ctx_vis_mask ) """ Dummy dataset for debug: """ class DummyDataset(Dataset): """ Args: dset_name, str, ["AQVSR"] Return: a dict: { "meta": { "query_id": int, "text_query": str, # purely text query "original_query": str, "query_image_path": str, "vid_name": str, # youtube_id (11) "answer_segment_name" list[str] # name of segments: ["xtuiYd45q1W_segment1",...] "answer_segment_id": list[segment_id], # unique_segment_id "answer_segment_info": list[[st,ed], ... [st,ed]] # start_time, end_time of coresponding segment "sample_seg_id_for_training": int, # sample one segment for training ##### } "query_text_feat": torch.tensor, (L, D_q) # query feature "query_vis_feat": torch.tensor, (n_region, 2048) # image feature&region feature "image_2_text_alignment": list[list] # image to token alignment "ctx_vis_feat": torch.tensor, (n_clip_in_segment, dim_video) # video feature "ctx_text_feat": torch.tensor, (n_clip_in_segment, dim_sub) # sub feature } """ def __init__(self, dset_name="dummy", data_path="", query_bert_path_or_handler="", sub_feat_path_or_handler="", vid_feat_path_or_handler="", normalize_vfeat=True, normalize_tfeat=True): self.dset_name = dset_name self.data = np.arange(1000) # load data self.data = load_func("data_path") # query -> self.query_bert_path_or_handler = query_bert_path_or_handler # self.query_image_feat_path_or_handler self.sub_feat_path_or_handler = sub_feat_path_or_handler self.vid_fear_path_or_handler = vid_feat_path_or_handler # Should be loaded from h5py file # self.query_feat_h5 = load_func(...path_to) # self.sub_feat_h5 = load_func(...path_to) # self.vid_feat_h5 = load_func(...path_to) ##### Dummy dataset, for debug purpose self.query_min_len, self.query_max_len = 10, 20 self.n_clip_min, self.n_clip_max = 20, 80 self.n_region_min, self.n_region_max = 1, 3 def __len__(self): return len(self.data) def __getitem__(self, index): ######## # load from annotation ######## # item = self.data[index] # meta = edict( # query_id = item["query_id"], # text_query = item["text_query"], # vid_name = item["vid_name"], # answer_segment_name = item["answer_segment_name"], # answer_segment_id = item["answer_segment_info"], # ) # # query_text_feat = load_func(...), # query_vis_feat = load_func(...), # text_image_alignment = load_func(...), # ctx_vis_feat = load_func(...), # ctx_text_feat = load_func(...), # # return edict( # meta = meta, # ... # ) ### For debug purpose qid = np.random.randint(1000) # dummy: sample from one of the 1000 data text_query = "This is a sample from dummy dataset." # for debug vid_name = 'xcvFGRT_O3Q' answer_segment_name = ['xcvFGRT_O3Q_seg1','xcvFGRT_O3Q_seg3'] answer_segment_id = [10555,10557] answer_segment_info = [[80,125], [220,320]] meta = edict( query_id = qid, text_query = text_query, vid_name = vid_name, answer_segment_name = answer_segment_name, answer_segment_id = answer_segment_id, answer_segment_info = answer_segment_info ) query_len = np.random.randint(self.query_min_len, self.query_max_len+1) query_text_feat = torch.randn(query_len, 768) n_img_region = np.random.randint(self.n_region_min, self.n_region_max+1) query_vis_feat = torch.randn(n_img_region, 2048) img_2_text_alignment = np.split( np.arange(query_len), np.sort(np.random.choice(np.arange(1,query_len-1), n_img_region-1, replace=False)) ) n_clip =
np.random.randint(self.n_clip_min, self.n_clip_max+1)
numpy.random.randint
import numpy as np from numpy import ma from scipy import sparse as sp # we don’t need this in requirements.txt, as it’s only needed for testing from pytest import mark from scanpy.data_structs.ann_data import AnnData, BoundStructArray, SMP_INDEX def test_creation(): AnnData(np.array([[1, 2], [3, 4]])) AnnData(ma.array([[1, 2], [3, 4]]), add={'mask': [0, 1, 1, 0]}) AnnData(sp.eye(2)) AnnData( np.array([[1, 2, 3], [4, 5, 6]]), dict(Smp=['A', 'B']), dict(Feat=['a', 'b', 'c'])) assert AnnData(np.array([1, 2])).X.shape == (2,) from pytest import raises raises(ValueError, AnnData, np.array([[1, 2], [3, 4]]), dict(TooLong=[1, 2, 3, 4])) def test_ddata(): ddata = dict( X=np.array([[1, 2, 3], [4, 5, 6]]), row_names=['A', 'B'], col_names=['a', 'b', 'c']) AnnData(ddata) def test_names(): adata = AnnData( np.array([[1, 2, 3], [4, 5, 6]]), dict(smp_names=['A', 'B']), dict(var_names=['a', 'b', 'c'])) assert adata.smp_names.tolist() == 'A B'.split() assert adata.var_names.tolist() == 'a b c'.split() adata = AnnData(np.array([[1, 2], [3, 4], [5, 6]]), var={'var_names': ['a', 'b']}) assert adata.var_names.tolist() == ['a', 'b'] def test_indices_dtypes(): adata = AnnData( np.array([[1, 2, 3], [4, 5, 6]]), dict(smp_names=['A', 'B']), dict(var_names=['a', 'b', 'c'])) # this assignment is nice adata.smp_names = ['d', 'b'] from pytest import raises with raises(ValueError): # this is not possible currently as we store # datatypes of fixed length adata.smp_names = ['hello', 'b'] # unicode not allowed for annotation adata.smp_names = ['ö', 'a'] def test_creation_from_vector(): adata = AnnData(np.array([1, 2, 3])) adata = AnnData(np.array([[1], [2], [3]])) def test_slicing(): adata = AnnData(np.array([[1, 2, 3], [4, 5, 6]])) assert np.all(adata[:, 0].X == adata.X[:, 0]) assert adata[0, 0].X.tolist() == 1 assert adata[0, :].X.tolist() == [1, 2, 3] assert adata[:, 0].X.tolist() == [1, 4] assert adata[:, [0, 1]].X.tolist() == [[1, 2], [4, 5]] assert adata[:, np.array([0, 2])].X.tolist() == [[1, 3], [4, 6]] assert adata[:, np.array([False, True, True])].X.tolist() == [[2, 3], [5, 6]] assert adata[:, 1:3].X.tolist() == [[2, 3], [5, 6]] def test_slicing_strings(): adata = AnnData( np.array([[1, 2, 3], [4, 5, 6]]), dict(smp_names=['A', 'B']), dict(var_names=['a', 'b', 'c'])) assert adata['A', 'a'].X.tolist() == 1 assert adata['A', :].X.tolist() == [1, 2, 3] assert adata[:, 'a'].X.tolist() == [1, 4] assert adata[:, ['a', 'b']].X.tolist() == [[1, 2], [4, 5]] assert adata[:, np.array(['a', 'c'])].X.tolist() == [[1, 3], [4, 6]] assert adata[:, 'b':'c'].X.tolist() == [[2, 3], [5, 6]] from pytest import raises with raises(IndexError): _ = adata[:, 'X'] with raises(IndexError): _ = adata['X', :] with raises(IndexError): _ = adata['A':'X', :] with raises(IndexError): _ = adata[:, 'a':'X'] def test_get_subset_add(): adata = AnnData(np.array([[1, 2, 3], [4, 5, 6]]), dict(Smp=['A', 'B']), dict(Feat=['a', 'b', 'c'])) assert adata[0, 0].smp['Smp'].tolist() == ['A'] assert adata[0, 0].var['Feat'].tolist() == ['a'] def test_transpose(): adata = AnnData( np.array([[1, 2, 3], [4, 5, 6]]), dict(smp_names=['A', 'B']), dict(var_names=['a', 'b', 'c'])) adata1 = adata.T # make sure to not modify the original! assert adata.smp_names.tolist() == ['A', 'B'] assert adata.var_names.tolist() == ['a', 'b', 'c'] assert SMP_INDEX in adata1.smp.dtype.names assert adata1.smp_names.tolist() == ['a', 'b', 'c'] assert adata1.var_names.tolist() == ['A', 'B'] assert adata1.X.shape == adata.X.T.shape adata2 = adata.transpose() assert np.array_equal(adata1.X, adata2.X) assert np.array_equal(adata1.smp, adata2.smp) assert np.array_equal(adata1.var, adata2.var) assert adata1.smp._is_attr_of[1] == 'smp' == adata2.smp._is_attr_of[1] assert adata1.var._is_attr_of[1] == 'var' == adata2.var._is_attr_of[1] def test_append_add_col(): adata = AnnData(
np.array([[1, 2, 3], [4, 5, 6]])
numpy.array
if __name__ == '__main__': import sys import numpy numpy.set_printoptions(precision=3) num_bins = int(sys.argv[1]) inputs = [] for line in sys.stdin: inputs.append(float(line.strip())) histo, bin_edges = numpy.histogram(inputs, bins=num_bins, density=True) q25 =
numpy.percentile(inputs, 25)
numpy.percentile
import matplotlib.pyplot as plt import xarray as xr import xarray.ufuncs as uf import numpy as np import warnings import gsw import traceback from .coast import Coast from .gridded import Gridded from scipy import interpolate from scipy.integrate import cumtrapz from sklearn.neighbors import BallTree from skimage import measure from .logging_util import warn, error # ============================================================================= # The contour module is a place for code related to contours only # ============================================================================= class Contour: # TODO Should these be module-level variables? GRAVITY = 9.8 # m s^-2 EARTH_ROT_RATE = 7.2921 * 10 ** (-5) # rad/s @staticmethod def get_contours(gridded: Coast, contour_depth: int): """ A method to obtain the continuous isbobath contours within a supplied gridded domain as a set of y indices and x indices for the model grid. Parameters ---------- gridded : Coast The gridded object containing the dataset with the 'bathymetry' variable contour_depth : int Depth of desired contours Returns ------- List of 2d ndarrays Each item of the list contains a different continuous isobath contour as a 2d ndarray of indicies, i.e. for each list item: contour[:,0] contains the y indices for the contour on the model grid contour[:,1] contains the x indices for the contour on the model grid """ contours = measure.find_contours(gridded.dataset.bathymetry.data, contour_depth) # The find_contours method returns indices that have been interpolated # between grid points so we must round and cast to integer contours = [np.round(contour).astype(int) for contour in contours] return contours, len(contours) @staticmethod def plot_contour(gridded: Coast, contour: np.ndarray): """ Quick plot method to plot a contour over a pcolormesh of the model bathymetry Parameters ---------- gridded : Coast The gridded object containing the dataset with the 'bathymetry' variable contour : 2d array contour[:,0] contains the y indices for the contour on the model grid contour[:,1] contains the x indices for the contour on the model grid i.e. contour = np.vstack((y_indices,x_indices)).T Returns ------- None """ fig, ax = plt.subplots() lat = gridded.dataset.latitude[xr.DataArray(contour[:, 0]), xr.DataArray(contour[:, 1])] lon = gridded.dataset.longitude[xr.DataArray(contour[:, 0]), xr.DataArray(contour[:, 1])] gridded.dataset.bathymetry.where(gridded.dataset.bathymetry > 0, np.nan).plot.pcolormesh( y="latitude", x="longitude", ax=ax ) ax.scatter(lon, lat, s=0.5, color="r") @staticmethod def get_contour_segment(gridded: Coast, contour: np.ndarray, start_coords: np.ndarray, end_coords: np.ndarray): """ Method that will take a contour from the list of contours generated by coast.Contour.get_contours() and trim it to start at supplied (lat,lon) coordinates and end at supplied (lat, lon) coordinates. Parameters ---------- gridded : Coast The gridded object containing the dataset with the 'bathymetry' variable contour : numpy.ndarray contour[:,0] contains the y indices for the contour on the model grid contour[:,1] contains the x indices for the contour on the model grid start_coords : numpy.ndarray 1d array containing [latitude,longitude] of the start point of the contour end_coords : numpy.ndarray 1d array containing [latitude,longitude] of the end point of the contour Returns ------- y_ind : numpy.ndarray y indices of the contour on the model grid x_ind : numpy.ndarray x indices of the contour on the model grid contour : numpy.ndarray For the convenience of plotting using coast.Contour.plot_contour() contour[:,0] = y_ind contour[:,1] = x_ind """ y_ind = contour[:, 0] x_ind = contour[:, 1] # Create tree of lat and lon on the pre-processed contour ball_tree = BallTree( np.deg2rad( list(zip(gridded.dataset.latitude.values[y_ind, x_ind], gridded.dataset.longitude.values[y_ind, x_ind])) ), metric="haversine", ) # Get start and end indices for contour and subset accordingly start_idx = ball_tree.query(np.deg2rad([start_coords]))[1][0][0] end_idx = ball_tree.query(np.deg2rad([end_coords]))[1][0][0] if start_idx > end_idx: y_ind = y_ind[end_idx : start_idx + 1] x_ind = x_ind[end_idx : start_idx + 1] else: y_ind = y_ind[start_idx : end_idx + 1] x_ind = x_ind[start_idx : end_idx + 1] # Ensure that the start point is closer to southern boundary of domain. # If start and end point have same latitude then ensure start point is # closer to the western boundary of the domain. if y_ind[0] > y_ind[-1]: y_ind = y_ind[::-1] x_ind = x_ind[::-1] elif y_ind[0] == y_ind[-1]: if x_ind[0] > x_ind[-1]: y_ind = y_ind[::-1] x_ind = x_ind[::-1] return y_ind, x_ind, np.vstack((y_ind, x_ind)).T def __init__(self, gridded: Coast, y_ind, x_ind, depth: int): """ Class defining a Contour type, which is a 3d dataset of points between a point A and a point B defining an isobath contour. The dataset has a time, depth and contour dimension. The contour dimension defines the points along the contour. The supplied model Data is subsetted in its entirety along these dimensions and calculations can be performed on this dataset. Parameters ---------- gridded : Coast gridded object containing the model dataset. y_ind : numpy.ndarray 1d array of y indices defining the contour on the model grid x_ind : numpy.ndarray 1d array of x indices defining the contour on the model grid depth : int Depth of contour isobath """ try: if y_ind[0] > y_ind[-1]: raise ValueError( "Start point of the contour " "must be closer than the end point of the " "contour to the southern boundary of the model " "domain." ) elif y_ind[0] == y_ind[-1]: if x_ind[0] > x_ind[-1]: raise ValueError( "Start and end points of the contour " "have the same latitudes, the start point must " "be the closer of the two points to the western " "boundary of the model domain." ) self.depth = depth self.y_ind, self.x_ind = self.process_contour(gridded.dataset, y_ind, x_ind) self.len = len(self.y_ind) self.filename_domain = gridded.filename_domain da_y_ind = xr.DataArray(self.y_ind, dims=["r_dim"]) da_x_ind = xr.DataArray(self.x_ind, dims=["r_dim"]) self.data_contour = gridded.dataset.isel(y_dim=da_y_ind, x_dim=da_x_ind) except ValueError: print(traceback.format_exc()) def process_contour(self, dataset: xr.Dataset, y_ind, x_ind): """Redefine contour so that each point on the contour defined by y_ind and x_ind is seperated from its neighbours by a single index change in y or x, but not both. example: convert y_ind = [10,11], x_ind = [1,2] to y_ind = [10,10], x_ind = [1,2] or y_ind = [10,11], x_ind = [1,1] Parameters ---------- dataset : xarray.Dataset xarray Dataset from supplied gridded object y_ind : numpy.ndarray 1d array of y indices defining the contour on the model grid x_ind : numpy.ndarray 1d array of x indices defining the contour on the model grid Returns ------- y_ind : numpy.ndarray processed y indices of the contour on the model grid x_ind : numpy.ndarray processed x indices of the contour on the model grid """ try: y_ind = np.asarray(y_ind) x_ind = np.asarray(x_ind) # When replacing diagonal segments in the contour, pick the path that is # closest to the contour isobath depth option1 = np.fabs(dataset.bathymetry[xr.DataArray(y_ind + 1), xr.DataArray(x_ind)] - self.depth) option0 = np.fabs(dataset.bathymetry[xr.DataArray(y_ind), xr.DataArray(x_ind + 1)] - self.depth) add_new_y_point = xr.where(option1 <= option0, 1, 0) spacing = np.abs(np.diff(y_ind)) + np.abs(np.diff(x_ind)) if spacing.max() > 2: raise ValueError( "The contour is not continuous. The contour must be defined on " "adjacent grid points." ) spacing[spacing != 2] = 0 double_spacing = np.nonzero(spacing)[0] for space_index in double_spacing[::-1]: if add_new_y_point[space_index]: y_ind = np.insert(y_ind, space_index + 1, y_ind[space_index + 1]) x_ind = np.insert(x_ind, space_index + 1, x_ind[space_index]) else: y_ind = np.insert(y_ind, space_index + 1, y_ind[space_index]) x_ind = np.insert(x_ind, space_index + 1, x_ind[space_index + 1]) # Remove any repeated points caused by the rounding of the indices non_repeated_idx = np.nonzero(np.abs(np.diff(y_ind)) + np.abs(np.diff(x_ind))) y_ind = np.concatenate((y_ind[non_repeated_idx], [y_ind[-1]])) x_ind = np.concatenate((x_ind[non_repeated_idx], [x_ind[-1]])) return y_ind, x_ind except ValueError: error(traceback.format_exc()) @staticmethod def gen_z_levels(max_depth): """Generates a pre-defined 1d vertical depth coordinates, i.e. horizontal z-level vertical coordinates up to a supplied maximum depth, 'max_depth'""" max_depth = max_depth + 650 z_levels_0_50 = np.arange(0, 55, 5) z_levels_60_290 = np.arange(60, 300, 10) z_levels_300_600 = np.arange(300, 650, 50) z_levels_650_ = np.arange(650, max_depth + 150, 150) z_levels = np.concatenate((z_levels_0_50, z_levels_60_290, z_levels_300_600, z_levels_650_)) z_levels = z_levels[z_levels <= max_depth] return z_levels class ContourF(Contour): """ Class defining a Contour type on the f-grid, which is a 3d dataset of points between a point A and a point B defining an isobath contour. The dataset has a time, depth and contour dimension. The contour dimension defines the points along the contour. The supplied model f-grid Data is subsetted in its entirety along these dimensions within Contour_f.data_contour of type xarray.Dataset Parameters ---------- gridded_f : Coast f-grid gridded object containing the model dataset. y_ind : numpy.ndarray 1d array of y indices defining the contour on the model grid x_ind : numpy.ndarray 1d array of x indices defining the contour on the model grid depth : int Depth of contour isobath """ def __init__(self, gridded_f: Coast, y_ind, x_ind, depth): super().__init__(gridded_f, y_ind, x_ind, depth) self.data_cross_flow = xr.Dataset() def calc_cross_contour_flow(self, gridded_u: Coast, gridded_v: Coast): """ Method that will calculate the flow across the contour and store this data within Contour_f.data_cross_flow, which is an xarray.Dataset. Specifically Contour_f.normal_velocities are the velocities across the contour (time, depth, position along contour) in m/s Contour_f.depth_integrated_normal_transport are the depth integrated volume transports across the contour (time, position along contour) in Sv If the time dependent cell thicknesses (e3) on the u and v grids are present in the gridded_u and gridded_v datasets they will be used, if they are not then the initial cell thicknesses (e3_0) will be used. Parameters ---------- gridded_u : Coast The gridded object containing the model data on the u-grid. gridded_v : Coast The gridded object containing the model data on the v-grid. Returns ------- None. """ # compute transports flag; set to false if suitable e3 not found compute_transports = True # subset the u and v datasets da_y_ind = xr.DataArray(self.y_ind, dims=["r_dim"]) da_x_ind = xr.DataArray(self.x_ind, dims=["r_dim"]) u_ds = gridded_u.dataset.isel(y_dim=da_y_ind, x_dim=da_x_ind) v_ds = gridded_v.dataset.isel(y_dim=da_y_ind, x_dim=da_x_ind) # use time varying if e3 is present, if not default to e3_0 if "e3" not in u_ds.data_vars: if "e3_0" not in u_ds.data_vars: warn("e3 not found, transports will not be calculated") compute_transports = False else: u_ds["e3"] = u_ds.e3_0.broadcast_like(u_ds.u_velocity) if "e3" not in v_ds.data_vars: if "e3_0" not in v_ds.data_vars: warn("e3 not found, transports will not be calculated") compute_transports = False else: v_ds["e3"] = v_ds.e3_0.broadcast_like(v_ds.v_velocity) # If time dimension is missing it can throw off the indexing so expand dims if "t_dim" not in u_ds.dims: u_ds["u_velocity"] = u_ds.u_velocity.expand_dims("t_dim", axis=0) if compute_transports: u_ds["e3"] = u_ds.e3.expand_dims("t_dim", axis=0) if "t_dim" not in v_ds.dims: v_ds["v_velocity"] = v_ds.v_velocity.expand_dims("t_dim", axis=0) if compute_transports: v_ds["e3"] = v_ds.e3.expand_dims("t_dim", axis=0) dr_n = np.where(np.diff(self.y_ind) > 0, np.arange(0, u_ds.r_dim.size - 1), np.nan) dr_n = dr_n[~np.isnan(dr_n)].astype(int) dr_s = np.where(np.diff(self.y_ind) < 0, np.arange(0, u_ds.r_dim.size - 1), np.nan) dr_s = dr_s[~np.isnan(dr_s)].astype(int) dr_e = np.where(np.diff(self.x_ind) > 0, np.arange(0, v_ds.r_dim.size - 1), np.nan) dr_e = dr_e[~np.isnan(dr_e)].astype(int) dr_w = np.where(np.diff(self.x_ind) < 0, np.arange(0, v_ds.r_dim.size - 1), np.nan) dr_w = dr_w[~np.isnan(dr_w)].astype(int) # Note that subsetting the dataset first instead of subsetting each array seperately, # as we do here, is neater but significantly slower. tmp_velocities = xr.full_like(u_ds.u_velocity, np.nan) tmp_velocities[:, :, dr_n] = u_ds.u_velocity.data[:, :, dr_n + 1] tmp_velocities[:, :, dr_s] = -u_ds.u_velocity.data[:, :, dr_s] tmp_velocities[:, :, dr_e] = -v_ds.v_velocity.data[:, :, dr_e + 1] tmp_velocities[:, :, dr_w] = v_ds.v_velocity.data[:, :, dr_w] self.data_cross_flow["normal_velocities"] = tmp_velocities[:, :, :-1] self.data_cross_flow["normal_velocities"].attrs = {"units": "m/s", "standard_name": "contour-normal velocities"} # Store the length of the contour segement (calling it e4) on the cross-contour velocity grid tmp_e4 = xr.full_like(u_ds.e1, np.nan) tmp_e4[dr_n] = u_ds.e2.data[dr_n + 1] tmp_e4[dr_s] = u_ds.e2.data[dr_s] tmp_e4[dr_e] = v_ds.e1.data[dr_e + 1] tmp_e4[dr_w] = v_ds.e1.data[dr_w] self.data_cross_flow["e4"] = tmp_e4[:-1] self.data_cross_flow["e4"].attrs = { "units": "m", "standard_name": "length of contour segment at the cross-contour velocity grid points", } if compute_transports: # calculate the transport across the contour tmp_transport = xr.full_like(u_ds.u_velocity, np.nan) tmp_transport[:, :, dr_n] = ( u_ds.u_velocity.data[:, :, dr_n + 1] * u_ds.e2.data[dr_n + 1] * u_ds.e3.data[:, :, dr_n + 1] ) tmp_transport[:, :, dr_s] = ( -u_ds.u_velocity.data[:, :, dr_s] * u_ds.e2.data[dr_s] * u_ds.e3.data[:, :, dr_s] ) tmp_transport[:, :, dr_e] = ( -v_ds.v_velocity.data[:, :, dr_e + 1] * v_ds.e1.data[dr_e + 1] * v_ds.e3.data[:, :, dr_e + 1] ) tmp_transport[:, :, dr_w] = v_ds.v_velocity.data[:, :, dr_w] * v_ds.e1.data[dr_w] * v_ds.e3.data[:, :, dr_w] self.data_cross_flow["normal_transport"] = tmp_transport[:, :, :-1] self.data_cross_flow["normal_transport"].attrs = { "units": "m^3/s", "standard_name": "contour-normal volume transport", } # calculate the depth integrated transport across the contour self.data_cross_flow["depth_integrated_normal_transport"] = ( self.data_cross_flow.normal_transport.sum(dim="z_dim") / 1000000.0 ) self.data_cross_flow["depth_integrated_normal_transport"].attrs = { "units": "Sv", "standard_name": "contour-normal depth integrated volume transport", } self._update_cross_flow_vars("depth_0", u_ds.depth_0, v_ds.depth_0, dr_n, dr_s, dr_e, dr_w, 1) self._update_cross_flow_latlon(u_ds, v_ds, dr_n, dr_s, dr_e, dr_w) self._update_cross_flow_vars("bathymetry", u_ds.bathymetry, v_ds.bathymetry, dr_n, dr_s, dr_e, dr_w, 0) self._update_cross_flow_vars("e1", u_ds.e1, v_ds.e1, dr_n, dr_s, dr_e, dr_w, 0) self._update_cross_flow_vars("e2", u_ds.e2, v_ds.e2, dr_n, dr_s, dr_e, dr_w, 0) if compute_transports: self._update_cross_flow_vars("e3", u_ds.e3, v_ds.e3, dr_n, dr_s, dr_e, dr_w, 2) self.data_cross_flow["depth_0"].attrs = { "standard_name": "Depth at time zero on the contour-normal velocity grid points" } self.data_cross_flow = self.data_cross_flow.squeeze() def _update_cross_flow_vars(self, var, u_var, v_var, dr_n, dr_s, dr_e, dr_w, pos): """This method will pull variable data at specific points along the contour from the u and v grid datasets and put them into the self.data_cross_flow dataset""" tmp_var = xr.full_like(u_var, np.nan) if pos == 0: tmp_var[dr_n] = u_var.data[dr_n + 1] tmp_var[dr_s] = u_var.data[dr_s] tmp_var[dr_e] = v_var.data[dr_e + 1] tmp_var[dr_w] = v_var.data[dr_w] self.data_cross_flow[var] = tmp_var[:-1] elif pos == 1: tmp_var[:, dr_n] = u_var.data[:, dr_n + 1] tmp_var[:, dr_s] = u_var.data[:, dr_s] tmp_var[:, dr_e] = v_var.data[:, dr_e + 1] tmp_var[:, dr_w] = v_var.data[:, dr_w] self.data_cross_flow[var] = tmp_var[:, :-1] elif pos == 2: tmp_var[:, :, dr_n] = u_var.data[:, :, dr_n + 1] tmp_var[:, :, dr_s] = u_var.data[:, :, dr_s] tmp_var[:, :, dr_e] = v_var.data[:, :, dr_e + 1] tmp_var[:, :, dr_w] = v_var.data[:, :, dr_w] self.data_cross_flow[var] = tmp_var[:, :, :-1] def _update_cross_flow_latlon(self, ds_u, ds_v, dr_n, dr_s, dr_e, dr_w): """This method will pull the latitude and longitude data at specific points along the contour from the u and v grid datasets and put them into the self.data_cross_flow dataset""" for var in ["longitude", "latitude"]: tmp_var = xr.full_like(ds_u[var], np.nan) tmp_var[dr_n] = ds_u[var].data[dr_n + 1] tmp_var[dr_s] = ds_u[var].data[dr_s] tmp_var[dr_e] = ds_v[var].data[dr_e + 1] tmp_var[dr_w] = ds_v[var].data[dr_w] tmp_var.attrs = {"standard_name": var.capitalize() + " at the contour-normal velocity grid points"} self.data_cross_flow.assign_coords({var: tmp_var[:-1]}) @staticmethod def _pressure_gradient_fpoint2(ds_t, ds_t_j1, ds_t_i1, ds_t_j1i1, r_ind, velocity_component): """ Calculates the hydrostatic and surface pressure gradients at a set of f-points along the contour, i.e. at a set of specific values of r_dim (but for all time and depth). The caller must supply four datasets that contain the variables which define the hydrostatic and surface pressure at all vertical z_levels and all time on the t-points around the contour i.e. for a set of f-points on the contour defined each defined at (j+1/2, i+1/2), we want t-points at (j,i), (j+1,i), (j,i+1), (j+1,i+1), corresponding to ds_t, ds_t_j1, ds_t_i1, ds_t_j1i1, respectively. ds_t, ds_t_j1, ds_t_i1, ds_t_j1i1 will have dimensions in time and depth. The velocity_component defines whether u or v is normal to the contour for the segments of the contour. A segment of contour is defined as being r_dim to r_dim+1 where r_dim is the along contour dimension. Returns ------- hpg_f : DataArray with dimensions in time and depth and along contour hydrostatic pressure gradient at a set of f-points along the contour for all time and depth spg_f : DataArray with dimensions in time and depth and along contour surface pressure gradient at a set of f-points along the contour """ if velocity_component == "u": # required scale factors for derivative and averaging e2v = 0.5 * (ds_t_j1.e2.data[r_ind] + ds_t.e2.data[r_ind]) e2v_i1 = 0.5 * (ds_t_j1i1.e2.data[r_ind] + ds_t_i1.e2.data[r_ind]) e1v = 0.5 * (ds_t_j1.e1.data[r_ind] + ds_t.e1.data[r_ind]) e1v_i1 = 0.5 * (ds_t_j1i1.e1.data[r_ind] + ds_t_i1.e1.data[r_ind]) e1f = 0.5 * (e1v + e1v_i1) # calculate gradients at v-points either side of f-point hpg = (ds_t_j1.pressure_h_zlevels.data[:, :, r_ind] - ds_t.pressure_h_zlevels.data[:, :, r_ind]) / e2v hpg_i1 = ( ds_t_j1i1.pressure_h_zlevels.data[:, :, r_ind] - ds_t_i1.pressure_h_zlevels.data[:, :, r_ind] ) / e2v_i1 # average onto f-point hpg_f = 0.5 * ((e1v * hpg) + (e1v_i1 * hpg_i1)) / e1f # as aboave spg = (ds_t_j1.pressure_s.data[:, r_ind] - ds_t.pressure_s.data[:, r_ind]) / e2v spg_i1 = (ds_t_j1i1.pressure_s.data[:, r_ind] - ds_t_i1.pressure_s.data[:, r_ind]) / e2v_i1 spg_f = 0.5 * ((e1v * spg) + (e1v_i1 * spg_i1)) / e1f elif velocity_component == "v": # TODO No else? What should happen if both conditions are False? # required scale factors for derivative and averaging e1u = 0.5 * (ds_t_i1.e1.data[r_ind] + ds_t.e1.data[r_ind]) e1u_j1 = 0.5 * (ds_t_j1i1.e1.data[r_ind] + ds_t_j1.e1.data[r_ind]) e2u = 0.5 * (ds_t_i1.e2.data[r_ind] + ds_t.e2.data[r_ind]) e2u_j1 = 0.5 * (ds_t_j1i1.e2.data[r_ind] + ds_t_j1.e2.data[r_ind]) e2f = 0.5 * (e2u + e2u_j1) # calculate gradients at u-points either side of f-point hpg = (ds_t_i1.pressure_h_zlevels.data[:, :, r_ind] - ds_t.pressure_h_zlevels.data[:, :, r_ind]) / e1u hpg_j1 = ( ds_t_j1i1.pressure_h_zlevels.data[:, :, r_ind] - ds_t_j1.pressure_h_zlevels.data[:, :, r_ind] ) / e1u_j1 # average onto f-point hpg_f = 0.5 * ((e2u * hpg) + (e2u_j1 * hpg_j1)) / e2f # as above spg = (ds_t_i1.pressure_s.data[:, r_ind] - ds_t.pressure_s.data[:, r_ind]) / e1u spg_j1 = (ds_t_j1i1.pressure_s.data[:, r_ind] - ds_t_j1.pressure_s.data[:, r_ind]) / e1u_j1 spg_f = 0.5 * ((e2u * spg) + (e2u_j1 * spg_j1)) / e2f return hpg_f, spg_f def calc_geostrophic_flow( self, gridded_t: Coast, ref_density=None, config_u="config/example_nemo_grid_u.json", config_v="config/example_nemo_grid_v.json", ): """ This method will calculate the geostrophic velocity and volume transport (due to the geostrophic current) across the contour. Four variables are added to the Contour.data_cross_flow dataset: 1. normal_velocity_hpg (t_dim, depth_z_levels, r_dim) This is the velocity due to the hydrostatic pressure gradient 2. normal_velocity_spg (t_dim, r_dim) This is the velocity due to the surface pressure gradient 3. transport_across_AB_hpg (t_dim, r_dim) This is the volume transport due to the hydrostatic pressure gradient 4. transport_across_AB_spg (t_dim, r_dim This is the volume transport due to the surface pressure gradient This implementation works by regridding vertically onto horizontal z_levels in order to perform the horizontal gradients. Currently s_level depths are assumed fixed at their initial depths, i.e. at time zero. Requirements: The gridded t-grid dataset, gridded_t, must contain the sea surface height, Practical Salinity and the Potential Temperature variables. The depth_0 field must also be supplied. The GSW package is used to calculate The Absolute Pressure, Absolute Salinity and Conservate Temperature. Parameters ---------- gridded_t : Coast This is the gridded model data on the t-grid for the entire domain. ref_density : TYPE, optional reference density value. If not supplied a mean in time, depth and along the contour will be used as the mean reference value. config_u : file configuration file for u-grid object config_v : file configuration file for v-grid object Returns ------- None. """ # If there is no time dimension, add one then remove at end. This is so # indexing can assume a time dimension exists gridded_t_local = gridded_t.copy() if "t_dim" not in gridded_t_local.dataset.dims: gridded_t_local.dataset = gridded_t_local.dataset.expand_dims(dim={"t_dim": 1}, axis=0) # We need to calculate the pressure at four t-points to get an # average onto the pressure gradient at the f-points, which will then # be averaged onto the normal velocity points. Here we subset the gridded_t # data around the contour so we have these four t-grid points at each # point along the contour cont_t = ContourT(gridded_t_local, self.y_ind, self.x_ind, self.depth) # j,i cont_t_j1 = ContourT(gridded_t_local, self.y_ind + 1, self.x_ind, self.depth) # j+1,i cont_t_i1 = ContourT(gridded_t_local, self.y_ind, self.x_ind + 1, self.depth) # j,i+1 cont_t_j1i1 = ContourT(gridded_t_local, self.y_ind + 1, self.x_ind + 1, self.depth) # j+1,i+1 bath_max = np.max( [ cont_t.data_contour.bathymetry.max().item(), cont_t_j1.data_contour.bathymetry.max().item(), cont_t_i1.data_contour.bathymetry.max().item(), cont_t_j1i1.data_contour.bathymetry.max().item(), ] ) z_levels = self.gen_z_levels(bath_max) cont_t.construct_pressure(ref_density, z_levels, extrapolate=True) cont_t_j1.construct_pressure(ref_density, z_levels, extrapolate=True) cont_t_i1.construct_pressure(ref_density, z_levels, extrapolate=True) cont_t_j1i1.construct_pressure(ref_density, z_levels, extrapolate=True) # Remove the mean hydrostatic pressure on each z_level from the hydrostatic pressure. # This helps to reduce the noise when taking the horizontal gradients of hydrostatic pressure. # Also catch and ignore nan-slice warning with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) pressure_h_zlevel_mean = xr.concat( ( cont_t.data_contour.pressure_h_zlevels, cont_t_j1.data_contour.pressure_h_zlevels, cont_t_i1.data_contour.pressure_h_zlevels, cont_t_j1i1.data_contour.pressure_h_zlevels, ), dim="concat_dim", ).mean(dim=("concat_dim", "r_dim", "t_dim"), skipna=True) if ref_density is None: ref_density = ( xr.concat( ( cont_t.data_contour.density_zlevels, cont_t_j1.data_contour.density_zlevels, cont_t_i1.data_contour.density_zlevels, cont_t_j1i1.data_contour.density_zlevels, ), dim="concat_dim", ) .mean(dim=("concat_dim", "r_dim", "t_dim", "depth_z_levels"), skipna=True) .item() ) cont_t.data_contour["pressure_h_zlevels"] = cont_t.data_contour.pressure_h_zlevels - pressure_h_zlevel_mean cont_t_j1.data_contour["pressure_h_zlevels"] = ( cont_t_j1.data_contour.pressure_h_zlevels - pressure_h_zlevel_mean ) cont_t_i1.data_contour["pressure_h_zlevels"] = ( cont_t_i1.data_contour.pressure_h_zlevels - pressure_h_zlevel_mean ) cont_t_j1i1.data_contour["pressure_h_zlevels"] = ( cont_t_j1i1.data_contour.pressure_h_zlevels - pressure_h_zlevel_mean ) # Coriolis parameter f = 2 * self.EARTH_ROT_RATE * np.sin(np.deg2rad(self.data_contour.latitude)) # Find the indices where the derivative of the contour in the north, south, east and west # directions are positive. dr_n = np.where(np.diff(self.y_ind) > 0, np.arange(0, self.data_contour.r_dim.size - 1), np.nan) dr_s = np.where(np.diff(self.y_ind) < 0, np.arange(0, self.data_contour.r_dim.size - 1), np.nan) dr_e = np.where(np.diff(self.x_ind) > 0, np.arange(0, self.data_contour.r_dim.size - 1), np.nan) dr_w = np.where(np.diff(self.x_ind) < 0, np.arange(0, self.data_contour.r_dim.size - 1), np.nan) dr_list = [ dr_n[~np.isnan(dr_n)].astype(int), dr_s[~np.isnan(dr_s)].astype(int), dr_e[~
np.isnan(dr_e)
numpy.isnan
"""Tests for the array pading functions. """ from __future__ import division, absolute_import, print_function from distutils.version import LooseVersion import numpy as np from numpy.testing import (assert_array_equal, assert_raises, assert_allclose, TestCase) try: from numpy.testing.decorators import skipif except AttributeError: from numpy.testing.dec import skipif from _skipclass import skipclassif from skfuzzy.image import pad @skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"), "NumPy's inbuilt pad used instead") class TestConditionalShortcuts(TestCase): def test_zero_padding_shortcuts(self): test = np.arange(120).reshape(4, 5, 6) pad_amt = [(0, 0) for axis in test.shape] modes = ['constant', 'edge', 'linear_ramp', 'maximum', 'mean', 'median', 'minimum', 'reflect', 'symmetric', 'wrap', ] for mode in modes: assert_array_equal(test, pad(test, pad_amt, mode=mode)) def test_shallow_statistic_range(self): test = np.arange(120).reshape(4, 5, 6) pad_amt = [(1, 1) for axis in test.shape] modes = ['maximum', 'mean', 'median', 'minimum', ] for mode in modes: assert_array_equal(pad(test, pad_amt, mode='edge'), pad(test, pad_amt, mode=mode, stat_length=1)) def test_clip_statistic_range(self): test = np.arange(30).reshape(5, 6) pad_amt = [(3, 3) for axis in test.shape] modes = ['maximum', 'mean', 'median', 'minimum', ] for mode in modes: assert_array_equal(pad(test, pad_amt, mode=mode), pad(test, pad_amt, mode=mode, stat_length=30)) @skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"), "NumPy's inbuilt pad used instead") class TestStatistic(TestCase): def test_check_mean_stat_length(self): a = np.arange(100).astype('f') a = pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), )) b = np.array( [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., 98. ]) assert_array_equal(a, b) def test_check_maximum_1(self): a = np.arange(100) a = pad(a, (25, 20), 'maximum') b = np.array( [99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99] ) assert_array_equal(a, b) def test_check_maximum_2(self): a = np.arange(100) + 1 a = pad(a, (25, 20), 'maximum') b = np.array( [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100] ) assert_array_equal(a, b) def test_check_maximum_stat_length(self): a = np.arange(100) + 1 a = pad(a, (25, 20), 'maximum', stat_length=10) b = np.array( [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100] ) assert_array_equal(a, b) def test_check_minimum_1(self): a = np.arange(100) a = pad(a, (25, 20), 'minimum') b = np.array( [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) assert_array_equal(a, b) def test_check_minimum_2(self): a = np.arange(100) + 2 a = pad(a, (25, 20), 'minimum') b = np.array( [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] ) assert_array_equal(a, b) def test_check_minimum_stat_length(self): a = np.arange(100) + 1 a = pad(a, (25, 20), 'minimum', stat_length=10) b = np.array( [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91] ) assert_array_equal(a, b) def test_check_median(self): a = np.arange(100).astype('f') a = pad(a, (25, 20), 'median') b = np.array( [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] ) assert_array_equal(a, b) def test_check_median_01(self): a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) a = pad(a, 1, 'median') b = np.array( [[4, 4, 5, 4, 4], [3, 3, 1, 4, 3], [5, 4, 5, 9, 5], [8, 9, 8, 2, 8], [4, 4, 5, 4, 4]] ) assert_array_equal(a, b) def test_check_median_02(self): a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) a = pad(a.T, 1, 'median').T b = np.array( [[5, 4, 5, 4, 5], [3, 3, 1, 4, 3], [5, 4, 5, 9, 5], [8, 9, 8, 2, 8], [5, 4, 5, 4, 5]] ) assert_array_equal(a, b) def test_check_median_stat_length(self): a = np.arange(100).astype('f') a[1] = 2. a[97] = 96. a = pad(a, (25, 20), 'median', stat_length=(3, 5)) b = np.array( [ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 0., 2., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., 90., 91., 92., 93., 94., 95., 96., 96., 98., 99., 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., 96.] ) assert_array_equal(a, b) def test_check_mean_shape_one(self): a = [[4, 5, 6]] a = pad(a, (5, 7), 'mean', stat_length=2) b = np.array( [[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]] ) assert_array_equal(a, b) def test_check_mean_2(self): a = np.arange(100).astype('f') a = pad(a, (25, 20), 'mean') b = np.array( [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] ) assert_array_equal(a, b) @skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"), "NumPy's inbuilt pad used instead") class TestConstant(TestCase): def test_check_constant(self): a = np.arange(100) a = pad(a, (25, 20), 'constant', constant_values=(10, 20)) b = np.array( [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20] ) assert_array_equal(a, b) def test_check_constant_zeros(self): a = np.arange(100) a = pad(a, (25, 20), 'constant') b = np.array( [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) assert_array_equal(a, b) def test_check_constant_float(self): # If input array is int, but constant_values are float, the dtype of # the array to be padded is kept arr = np.arange(30).reshape(5, 6) test = pad(arr, (1, 2), mode='constant', constant_values=1.1) expected = np.array( [[ 1, 1, 1, 1, 1, 1, 1, 1, 1], [ 1, 0, 1, 2, 3, 4, 5, 1, 1], [ 1, 6, 7, 8, 9, 10, 11, 1, 1], [ 1, 12, 13, 14, 15, 16, 17, 1, 1], [ 1, 18, 19, 20, 21, 22, 23, 1, 1], [ 1, 24, 25, 26, 27, 28, 29, 1, 1], [ 1, 1, 1, 1, 1, 1, 1, 1, 1], [ 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) assert_allclose(test, expected) def test_check_constant_float2(self): # If input array is float, and constant_values are float, the dtype of # the array to be padded is kept - here retaining the float constants arr = np.arange(30).reshape(5, 6) arr_float = arr.astype(np.float64) test = pad(arr_float, ((1, 2), (1, 2)), mode='constant', constant_values=1.1) expected = np.array( [[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], [ 1.1, 0. , 1. , 2. , 3. , 4. , 5. , 1.1, 1.1], [ 1.1, 6. , 7. , 8. , 9. , 10. , 11. , 1.1, 1.1], [ 1.1, 12. , 13. , 14. , 15. , 16. , 17. , 1.1, 1.1], [ 1.1, 18. , 19. , 20. , 21. , 22. , 23. , 1.1, 1.1], [ 1.1, 24. , 25. , 26. , 27. , 28. , 29. , 1.1, 1.1], [ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], [ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]] ) assert_allclose(test, expected) def test_check_constant_float3(self): a = np.arange(100, dtype=float) a = pad(a, (25, 20), 'constant', constant_values=(-1.1, -1.2)) b = np.array( [-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2] ) assert_allclose(a, b) def test_check_constant_odd_pad_amount(self): arr = np.arange(30).reshape(5, 6) test = pad(arr, ((1,), (2,)), mode='constant', constant_values=3) expected = np.array( [[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3], [ 3, 3, 0, 1, 2, 3, 4, 5, 3, 3], [ 3, 3, 6, 7, 8, 9, 10, 11, 3, 3], [ 3, 3, 12, 13, 14, 15, 16, 17, 3, 3], [ 3, 3, 18, 19, 20, 21, 22, 23, 3, 3], [ 3, 3, 24, 25, 26, 27, 28, 29, 3, 3], [ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]] ) assert_allclose(test, expected) @skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"), "NumPy's inbuilt pad used instead") class TestLinearRamp(TestCase): def test_check_simple(self): a = np.arange(100).astype('f') a = pad(a, (25, 20), 'linear_ramp', end_values=(4, 5)) b = np.array( [4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56, 2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96, 0.80, 0.64, 0.48, 0.32, 0.16, 0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0, 94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0, 47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.] ) assert_allclose(a, b, rtol=1e-5, atol=1e-5) def test_check_2d(self): arr = np.arange(20).reshape(4, 5).astype(np.float64) test = pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0)) expected = np.array( [[0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0.5, 1., 1.5, 2., 1., 0.], [0., 0., 0., 1., 2., 3., 4., 2., 0.], [0., 2.5, 5., 6., 7., 8., 9., 4.5, 0.], [0., 5., 10., 11., 12., 13., 14., 7., 0.], [0., 7.5, 15., 16., 17., 18., 19., 9.5, 0.], [0., 3.75, 7.5, 8., 8.5, 9., 9.5, 4.75, 0.], [0., 0., 0., 0., 0., 0., 0., 0., 0.]]) assert_allclose(test, expected) @skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"), "NumPy's inbuilt pad used instead") class TestReflect(TestCase): def test_check_simple(self): a = np.arange(100) a = pad(a, (25, 20), 'reflect') b = np.array( [25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80, 79] ) assert_array_equal(a, b) def test_check_odd_method(self): a = np.arange(100) a = pad(a, (25, 20), 'reflect', reflect_type='odd') b = np.array( [-25, -24, -23, -22, -21, -20, -19, -18, -17, -16, -15, -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119] ) assert_array_equal(a, b) def test_check_large_pad(self): a = [[4, 5, 6], [6, 7, 8]] a = pad(a, (5, 7), 'reflect') b = np.array( [[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] ) assert_array_equal(a, b) def test_check_shape(self): a = [[4, 5, 6]] a = pad(a, (5, 7), 'reflect') b = np.array( [[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] ) assert_array_equal(a, b) def test_check_01(self): a = pad([1, 2, 3], 2, 'reflect') b = np.array([3, 2, 1, 2, 3, 2, 1]) assert_array_equal(a, b) def test_check_02(self): a = pad([1, 2, 3], 3, 'reflect') b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2]) assert_array_equal(a, b) def test_check_03(self): a = pad([1, 2, 3], 4, 'reflect') b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3]) assert_array_equal(a, b) @skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"), "NumPy's inbuilt pad used instead") class TestSymmetric(TestCase): def test_check_simple(self): a = np.arange(100) a = pad(a, (25, 20), 'symmetric') b = np.array( [24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 99, 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80] ) assert_array_equal(a, b) def test_check_odd_method(self): a = np.arange(100) a = pad(a, (25, 20), 'symmetric', reflect_type='odd') b = np.array( [-24, -23, -22, -21, -20, -19, -18, -17, -16, -15, -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118] ) assert_array_equal(a, b) def test_check_large_pad(self): a = [[4, 5, 6], [6, 7, 8]] a = pad(a, (5, 7), 'symmetric') b = np.array( [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]] ) assert_array_equal(a, b) def test_check_large_pad_odd(self): a = [[4, 5, 6], [6, 7, 8]] a = pad(a, (5, 7), 'symmetric', reflect_type='odd') b = np.array( [[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6], [-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6], [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8], [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8], [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10], [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10], [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12], [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12], [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14], [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14], [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16], [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16], [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18], [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18]] ) assert_array_equal(a, b) def test_check_shape(self): a = [[4, 5, 6]] a = pad(a, (5, 7), 'symmetric') b = np.array( [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]] ) assert_array_equal(a, b) def test_check_01(self): a = pad([1, 2, 3], 2, 'symmetric') b = np.array([2, 1, 1, 2, 3, 3, 2]) assert_array_equal(a, b) def test_check_02(self): a = pad([1, 2, 3], 3, 'symmetric') b = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1]) assert_array_equal(a, b) def test_check_03(self): a = pad([1, 2, 3], 6, 'symmetric') b = np.array([1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3]) assert_array_equal(a, b) @skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"), "NumPy's inbuilt pad used instead") class TestWrap(TestCase): def test_check_simple(self): a = np.arange(100) a = pad(a, (25, 20), 'wrap') b = np.array( [75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] ) assert_array_equal(a, b) def test_check_large_pad(self): a = np.arange(12) a = np.reshape(a, (3, 4)) a = pad(a, (10, 12), 'wrap') b = np.array( [[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11], [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3], [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7], [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11], [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3], [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7], [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11], [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3], [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7], [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11], [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3], [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7], [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11], [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3], [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7], [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11], [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3], [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7], [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11], [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3], [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7], [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11], [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3], [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7], [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11]] ) assert_array_equal(a, b) def test_check_01(self): a = pad([1, 2, 3], 3, 'wrap') b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3]) assert_array_equal(a, b) def test_check_02(self): a = pad([1, 2, 3], 4, 'wrap') b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1]) assert_array_equal(a, b) @skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"), "NumPy's inbuilt pad used instead") class TestStatLen(TestCase): def test_check_simple(self): a = np.arange(30) a = np.reshape(a, (6, 5)) a = pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,)) b = np.array( [[6, 6, 6, 5, 6, 7, 8, 9, 8, 8], [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], [1, 1, 1, 0, 1, 2, 3, 4, 3, 3], [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], [11, 11, 11, 10, 11, 12, 13, 14, 13, 13], [16, 16, 16, 15, 16, 17, 18, 19, 18, 18], [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], [26, 26, 26, 25, 26, 27, 28, 29, 28, 28], [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], [21, 21, 21, 20, 21, 22, 23, 24, 23, 23]] ) assert_array_equal(a, b) @skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"), "NumPy's inbuilt pad used instead") class TestEdge(TestCase): def test_check_simple(self): a = np.arange(12) a = np.reshape(a, (4, 3)) a = pad(a, ((2, 3), (3, 2)), 'edge') b = np.array( [[0, 0, 0, 0, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 2, 2], [3, 3, 3, 3, 4, 5, 5, 5], [6, 6, 6, 6, 7, 8, 8, 8], [9, 9, 9, 9, 10, 11, 11, 11], [9, 9, 9, 9, 10, 11, 11, 11], [9, 9, 9, 9, 10, 11, 11, 11], [9, 9, 9, 9, 10, 11, 11, 11]] ) assert_array_equal(a, b) @skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"), "NumPy's inbuilt pad used instead") class TestZeroPadWidth(TestCase): def test_zero_pad_width(self): arr =
np.arange(30)
numpy.arange
""" Useful small tools for spectroscopic analysis """ # Python 3 vs. 2 from __future__ import print_function, division # Standard Library Modules import numpy as np from scipy.interpolate import UnivariateSpline, interp1d from scipy import constants # Third Party Modules # Your Own Modules ##################### # Code starts here # ##################### # Create a center wavelength grid with constant width in log (i.e., velocity) space: # Input is in Angstrom, output is log10(lambda/Angstrom) def get_loglam(minwave=448., maxwave=10402., dloglam=1.E-4, pivot=None): """Return a central wavelength grid uniform in velocity space """ if minwave>maxwave: raise ValueError("Your maximum wavelength is smaller than the minimum wavelength.") if not pivot: return np.arange(np.log10(minwave),
np.log10(maxwave)
numpy.log10
import random from scipy.spatial.distance import squareform, pdist import numpy as np from sklearn import linear_model import gibbs from sklearn.neighbors import NearestNeighbors from vae_ld.learning_dynamics import logger class TwoNN: """ Implementation of the ID estimator TwoNN from [1] [1] Estimating the intrinsic dimension of datasets by a minimal neighborhood information <NAME>, <NAME>, <NAME>, and <NAME>, 2017 """ def __init__(self): self._to_keep = 0.9 self._knn = NearestNeighbors(n_neighbors=3) @property def to_keep(self): return self._to_keep @to_keep.setter def to_keep(self, to_keep): """ Set the fraction of data points to keep during the ID estimate """ if to_keep <= 0 or to_keep > 1: raise ValueError("The fraction to keep must be between 0 (excluded) and 1.") self._to_keep = to_keep def fit_transform(self, X): """ Compute the intrinsic dimension estimation, based on the implementation of [1] and [2]. The steps described in [3] (p.3) are outlined in the code comments. [1] https://github.com/efacco/TWO-NN (C++ implementation by the authors of [3]) [2] https://github.com/ansuini/IntrinsicDimDeep (Python implementation by the authors of [4]) [3] Estimating the intrinsic dimension of datasets by a minimal neighborhood information <NAME>, <NAME>, <NAME>, and <NAME>, 2017 [4] Intrinsic dimension of data representations in deep neural networks <NAME>, <NAME>, <NAME>, and <NAME>, 2019 """ self._knn.fit(X) # 1. Compute the pairwise distances for each point in the dataset logger.info("Computing the pairwise distance between each point of the dataset") # x_dist = np.sort(squareform(pdist(X)), axis=1, kind="heapsort") x_dist = self._knn.kneighbors(X)[0] # 2. Get two shortest distances logger.info("Getting the two shortest distances") r1 = x_dist[:, 1] r2 = x_dist[:, 2] # This step was added in Ansuini et al. implementation # logger.info("Removing zero values and degeneracies") # zeros = np.where(r1 == 0)[0] # degeneracies = np.where(r1 == r2)[0] # good = np.setdiff1d(np.arange(x_dist.shape[0]), np.array(zeros)) # good = np.setdiff1d(good, np.array(degeneracies)) # logger.info(good.shape) # r1 = r1[good] # r2 = r2[good] # 3. For each point i compute mu_i logger.info("Computing mu_i for each point i") mu = np.sort(r2/r1, kind="heapsort") # 4. Compute the empirical cumulate Femp(mu) logger.info("Computing the empirical cumulate") n = r1.shape[0] Femp = np.arange(0, n, dtype=np.float64) / n # 5. Fit the points of the plane given by coordinates {(log(mu_i), -log(1 - Femp(mu_i)))|i=1, …, n} with a # straight line passing through the origin, using the analytical solution of the linear regression. # Note that we discard 10% of the points by default, as recommended in the TwoNN paper logger.info("Fitting the {}% first points with a linear regression".format(self._to_keep * 100)) n_to_keep = int(n * self._to_keep) x = np.log(mu)[:n_to_keep] y = -np.log(1 - Femp)[:n_to_keep] d = np.dot(x, y) / np.dot(x, x) return d class MLE: def __init__(self, k, seed, runs=5, anchor=0.9): self._anchor = anchor self._k = k self._seed = seed self._n_runs = runs self._knn = NearestNeighbors(n_neighbors=k+1) @property def anchor(self): return self._anchor @anchor.setter def anchor(self, anchor): """ Set the fraction of data points to keep during the ID estimate """ if anchor <= 0 or anchor > 1: raise ValueError("The anchor fraction must be between 0 (excluded) and 1.") self._anchor = anchor @property def k(self): return self._k @k.setter def anchor(self, k): """ Set the fraction of data points to keep during the ID estimate """ if k <= 0: raise ValueError("The number of neighbours must be greater than 0.") self._k = k def fit_transform(self, X): anchor_samples = int(self.anchor * X.shape[0]) res = np.zeros((self._n_runs,)) data_idxs = np.arange(X.shape[0]) self._knn.fit(X) for i in range(self._n_runs): logger.info("Computing iteration {} of MLE with k={}".format(i, self._k)) np.random.shuffle(data_idxs) anchor_idxs = data_idxs[:anchor_samples] res[i] = self._compute_mle(X[anchor_idxs]) return res.mean() def _compute_mle(self, X): dist = self._knn.kneighbors(X)[0][:, 1:] if not np.all(dist > 0.): logger.info(np.argwhere(dist <= 0.)) logger.info(dist[np.argwhere(dist <= 0.)]) assert np.all(dist > 0.) d = np.log(dist[:, self._k - 1: self._k] / dist[:, 0:self._k - 1]) d = d.sum(axis=1) / (self.k - 2) return 1. / d.mean() class Hidalgo: """ Compute Hidalgo, an algorithm initially proposed in [1]. The implementation is from https://github.com/micheleallegra/Hidalgo/tree/master/python, the code released with [1]. [1] Data segmentation based on the local intrinsic dimension, Allegra et al., 2020 """ def __init__(self, metric='euclidean', k=2, zeta=0.8, q=3, iters=10000, replicas=10, burn_in=0.9): """ :param metric: The metric to use for KNN, if predefined, then a distance matrix will be given when calling fit :param k: The number of manifolds :param zeta: The probability to sample the neighbour of a point from the same manifold (in the paper's formula, this is xsi) :param q: number of closest neighbours from each points to keep :param iters: number of iterations of the Gibbs sampling :param replicas: number of times the sampling should be replicated :param burn_in: percentage of points to exclude of the estimation """ self.metric = metric self.k = k self.zeta = zeta self.q = q self.iters = iters self.burn_in = burn_in self.replicas = replicas # Setting prior parameters of d to 1 self.a = np.ones(k) self.b = np.ones(k) # Setting prior parameter of p to 1 self.c = np.ones(k) # Setting prior parameter of zeta to 1 self.f = np.ones(k) # Setting the save samples every 10 sampling and compute the total number of samples self.sampling_rate = 10 self.n_samples = np.floor((self.iters - np.ceil(self.burn_in * self.iters)) / self.sampling_rate).astype(int) # z will not be fixed self.fixed_z = 0 # Local interaction between z are used self.use_local_z_interaction = 1 # z will not be updated during the training self.update_z = 0 def _fit(self, X): assert isinstance(X, np.ndarray), "X should be a numpy array" assert len(np.shape(X)) == 2, "X should be a two-dimensional numpy array" n, d = np.shape(X) nns_mat = np.zeros((n, n)) logger.info("Getting the {} nearest neighbours from each point".format(self.q)) if self.metric == "predefined": distances = np.sort(X)[:, :self.q + 1] indices_in = np.argsort(X)[:, :self.q + 1] else: nns = NearestNeighbors(n_neighbors=self.q + 1, algorithm="ball_tree", metric=self.metric).fit(X) distances, indices_in = nns.kneighbors(X) for i in range(self.q): nns_mat[indices_in[:, 0], indices_in[:, i + 1]] = 1 nns_count = np.sum(nns_mat, axis=0) indices_out = np.where(nns_mat.T)[1] indices_track =
np.cumsum(nns_count)
numpy.cumsum
#!/usr/bin/env python # -*- coding: utf-8 -*- # # @Author: <NAME> (<EMAIL>) # @Date: 2017-10-17 # @Filename: region.py # @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) # # @Last modified by: <NAME> (<EMAIL>) # @Last modified time: 2019-03-13 11:32:39 import astropy.units as u from astropy.coordinates import SkyCoord import matplotlib.patches import matplotlib.path import matplotlib.transforms import numpy from copy import deepcopy from spherical_geometry import polygon as sp from lvmsurveysim.utils import plot as lvm_plot from lvmsurveysim.exceptions import LVMSurveyOpsError, LVMSurveyOpsWarning from . import _VALID_FRAMES __all__ = ['SkyRegion'] # if we want to inherit: #super(SubClass, self).__init__('x') class SkyRegion(object): """ This class represents a region on the sky. This class represents a region on the sky, parameterized either by one of a number of common shapes, or by a set of vertices of a polygon. Internally all shapes are held as `~spherical_geometry.polygon` object so that the edges of the polygons are great circle segments on a sphere. The class provides convenience methods to construct such polygons using the Target parameterization from the target yaml file. It also provides methods to compute intersections between the regions and whether a point is contained in the region (used later in tiling). Parameters ---------- typ : str String describing the shape, one of 'circle', 'ellipse', 'rectangle' 'polygon' or 'raw'. Depending on the value of this parameter, we expect to find further parameters in **kwargs. coords : tuple of float Center coordinates for 'circle', 'ellipse', 'rectangle' regions, or tuple of vertices for 'polygon' in degrees. For 'raw', we expect the `~spherical_geometry.SphericalPolygon` object. **kwargs : dict Must contain keyword 'frame' set to 'icrs' or 'galactic'. For 'rectangle' must contain 'width' and 'height' in degrees and 'pa' a position angle (N through E) also in degrees. For 'circle' must contain 'r' with radius in degrees. For 'ellipse' must contain 'a', 'b', 'pa' with semi-axes and position angle in degrees. For 'raw' we expect only the 'frame' to be passed as a keyword argument. """ def __init__(self, typ, coords, **kwargs): #print(typ, coords, kwargs) self.region_type = typ self.frame = kwargs['frame'] if typ == 'rectangle': self.center = coords width = kwargs['width'] height = kwargs['height'] x0 = - width / 2. x1 = + width / 2. y0 = - height / 2. y1 = + height / 2. x, y = self._rotate_coords([x0, x1, x1, x0, x0], [y0, y0, y1, y1, y0], kwargs['pa']) x, y = self._polygon_perimeter(x, y) y += self.center[1] x = x / numpy.cos(numpy.deg2rad(y)) + self.center[0] self.region = sp.SphericalPolygon.from_radec(x, y, degrees=True) elif typ == 'circle': self.center = coords r = kwargs['r'] k = int(numpy.max([numpy.floor(numpy.sqrt(r * 20)), 24])) x = numpy.array(list(reversed([r * numpy.cos(2.0*numpy.pi/k * i) for i in range(k+1)]))) y = numpy.array(list(reversed([r * numpy.sin(2.0*numpy.pi/k * i) for i in range(k+1)]))) y += self.center[1] x = x / numpy.cos(numpy.deg2rad(y)) + self.center[0] self.region = sp.SphericalPolygon.from_radec(x, y, center=self.center, degrees=True) # self.region = sp.SphericalPolygon.from_cone(coords[0], coords[1], kwargs['r']) # self.center = coords elif typ == 'ellipse': self.center = coords a, b = kwargs['a'], kwargs['b'] k = int(numpy.max([numpy.floor(numpy.sqrt(((a + b) / 2) * 20)), 24])) x = list(reversed([a * numpy.cos(2.0*numpy.pi/k * i) for i in range(k+1)])) y = list(reversed([b * numpy.sin(2.0*numpy.pi/k * i) for i in range(k+1)])) x, y = self._rotate_coords(x, y, kwargs['pa']) y += self.center[1] x = x / numpy.cos(numpy.deg2rad(y)) + self.center[0] self.region = sp.SphericalPolygon.from_radec(x, y, center=self.center, degrees=True) elif typ == 'polygon': self.region_type = 'polygon' x, y = self._rotate_vertices(numpy.array(coords), 0.0) x, y = self._polygon_perimeter(x, y) self.center = [numpy.average(x), numpy.average(y)] self.region = sp.SphericalPolygon.from_radec(x, y, center=self.center, degrees=True) elif typ == 'raw': assert isinstance(coords, sp.SphericalPolygon), 'Raw SkyRegion requires SphericalPolygon.' self.region_type = 'polygon' self.region = coords self.frame = kwargs['frame'] x, y = next(self.region.to_lonlat()) self.center = [numpy.average(x), numpy.average(y)] else: raise LVMSurveyOpsError('Unknown region type '+typ) def __repr__(self): return f'<SkyRegion(type={self.region_type}, center={self.center}, frame={self.frame})>' def vertices(self): """ Return a `~numpy.array` of dimension Nx2 with the N vertices of the SkyRegion. """ i = self.region.to_lonlat() return numpy.array(next(i)).T def bounds(self): """ Return a tuple of the bounds of the SkyRegion defined as the minimum and maximum value of the coordinates in each dimension. """ x, y = next(self.region.to_lonlat()) return numpy.min(x),
numpy.min(y)
numpy.min
""" Usage: python test_image_lm86_with_mtcnn.py --checkpoint_dir=checkpoints/test/model.ckpt-64001 """ #from __future__ import absolute_import #from __future__ import division #from __future__ import print_function import cv2, os, importlib, math import os.path as osp import numpy as np import tensorflow as tf from numpy.linalg import inv, norm, lstsq from numpy.linalg import matrix_rank as rank fixed_pts = np.array([73.3451,118.4487,96.0404,77.5112,114.8624,96.0070,95.7575,121.6586,148.0636,147.8569]).reshape((2,-1)).transpose() warp_size = [224, 192] # height, width pts_mean86 = np.array([45.559502,104.24780,47.094501,117.77705,49.368011,129.97537,52.305340,141.47940,56.249920,154.27869,\ 63.107460,165.01971,71.174850,172.36023,81.929237,178.68507,97.289093,182.29616,112.52919,177.89139,\ 123.33920,171.54056,131.66940,164.26958,138.23056,153.52193,141.85754,140.57895,144.45071,128.84717,\ 146.39426,116.36816,147.67754,102.47821,55.485870,84.340775,61.147385,78.009048,69.581528,75.797379,\ 78.612648,76.860222,86.064697,79.822960,62.489975,82.971130,69.879005,82.226051,77.701530,82.390945,\ 85.335213,84.248680,105.48699,79.453552,112.96900,76.432724,122.02381,75.246162,130.61064,77.285698,\ 136.46480,83.536705,106.27076,83.917999,113.94787,82.011887,121.85647,81.779221,129.34711,82.364937,\ 63.320316,96.792084,67.515862,94.584686,77.845810,94.563499,81.965393,97.318008,77.402710,98.552208,\ 67.509659,98.513344,72.628456,93.677307,72.395409,99.211624,110.02992,97.172417,114.07248,94.319572,\ 124.35910,94.110191,128.54343,96.266449,124.43443,98.040421,114.60693,98.309441,119.23931,93.295609,\ 119.60595,98.848557,95.895660,93.517433,95.888680,102.36029,95.881584,111.20296,95.874641,120.04578,\ 87.517784,97.529457,104.33669,97.407219,84.132019,116.47855,107.81488,116.41264,80.940468,124.97491,\ 111.12064,124.88945,85.455589,127.70387,90.463188,128.69844,95.953407,129.95752,101.45199,128.67410,\ 106.51112,127.66216,78.027786,147.66968,91.463295,140.84270,96.066689,141.89987,100.57447,140.78816,\ 114.46491,147.44310,96.189842,157.38145,84.125710,143.56898,108.07687,143.42168,109.86893,152.31499,\ 82.586426,152.45120,80.742477,147.74809,111.71300,147.55542,87.001198,146.80209,96.081726,146.87469,\ 105.23645,146.70581,86.978920,148.79839,96.164139,149.49869,105.38802,148.72549,88.427788,156.01730,\ 103.95959,155.95354]) pts_mean86 = pts_mean86.reshape(86,2) def tformfwd(trans, uv): uv = np.hstack(( uv, np.ones((uv.shape[0], 1)) )) xy = np.dot(uv, trans) xy = xy[:, 0:-1] return xy def get_similarity_transform(src_pts, dst_pts, reflective=True): if reflective: trans, trans_inv = findSimilarity(src_pts, dst_pts) else: trans, trans_inv = findNonreflectiveSimilarity(src_pts, dst_pts) return trans, trans_inv def findSimilarity(uv, xy, options=None): options = {'K': 2} # uv = np.array(uv) # xy = np.array(xy) # Solve for trans1 trans1, trans1_inv = findNonreflectiveSimilarity(uv, xy, options) # Solve for trans2 # manually reflect the xy data across the Y-axis xyR = xy.copy() xyR[:, 0] = -1 * xyR[:, 0] trans2r, trans2r_inv = findNonreflectiveSimilarity(uv, xyR, options) # manually reflect the tform to undo the reflection done on xyR TreflectY = np.array([ [-1, 0, 0], [0, 1, 0], [0, 0, 1] ]) trans2 = np.dot(trans2r, TreflectY) # Figure out if trans1 or trans2 is better xy1 = tformfwd(trans1, uv) norm1 = norm(xy1 - xy) xy2 = tformfwd(trans2, uv) norm2 = norm(xy2 - xy) if norm1 <= norm2: return trans1, trans1_inv else: trans2_inv = inv(trans2) return trans2, trans2_inv def findNonreflectiveSimilarity(uv, xy, options=None): options = {'K': 2} K = options['K'] M = xy.shape[0] x = xy[:, 0].reshape((-1, 1)) # use reshape to keep a column vector y = xy[:, 1].reshape((-1, 1)) # use reshape to keep a column vector # print '--->x, y:\n', x, y tmp1 = np.hstack((x, y, np.ones((M, 1)), np.zeros((M, 1)))) tmp2 = np.hstack((y, -x, np.zeros((M, 1)), np.ones((M, 1)))) X = np.vstack((tmp1, tmp2)) # print '--->X.shape: ', X.shape # print 'X:\n', X u = uv[:, 0].reshape((-1, 1)) # use reshape to keep a column vector v = uv[:, 1].reshape((-1, 1)) # use reshape to keep a column vector U = np.vstack((u, v)) # print '--->U.shape: ', U.shape # print 'U:\n', U # We know that X * r = U if rank(X) >= 2 * K: r, _, _, _ = lstsq(X, U) r = np.squeeze(r) else: raise Exception('cp2tform:twoUniquePointsReq') # print '--->r:\n', r sc = r[0] ss = r[1] tx = r[2] ty = r[3] Tinv = np.array([ [sc, -ss, 0], [ss, sc, 0], [tx, ty, 1] ]) # print '--->Tinv:\n', Tinv T = inv(Tinv) # print '--->T:\n', T T[:, 2] = np.array([0, 0, 1]) return T, Tinv def detect_lmk86(batch_imgs, batch_bboxes, batch_points, pb_path): batch_size = 1 with tf.Graph().as_default(): graph_def = tf.GraphDef() graph_file= pb_path with open(graph_file, 'rb') as f: print('hello') graph_def.ParseFromString(f.read()) tf.import_graph_def(graph_def, name="") with tf.Session() as sess: tf.global_variables_initializer().run() image = sess.graph.get_tensor_by_name('lmk86pt_input:0') predict_lanmark = sess.graph.get_tensor_by_name('lmk86pt_output:0') return_lms = [] count = 0 for img_index in range(len(batch_bboxes)): img = batch_imgs[img_index] # rgb -> bgr img = img[:,:,::-1] # concat warped faces batch_faces = [] trans_invs = [] for face_index in range(len(batch_bboxes[img_index])): # similarity transform mtcnn_landmark = np.transpose(batch_points[img_index][face_index].reshape(2,5)) trans, trans_inv = get_similarity_transform(mtcnn_landmark, fixed_pts) warp_img = cv2.warpAffine(img, trans[:, 0:2].T, (int(warp_size[1]), int(warp_size[0]))) batch_faces.append(warp_img) trans_invs.append(trans_inv) if len(batch_faces) == 0: return_lms.append(None) continue batch_faces = np.stack(batch_faces, axis=0) # batch mode out_predict_lanmarks = [] for i in range(int(math.ceil(len(batch_faces)/float(batch_size)))): now_batch_faces = batch_faces[i*batch_size:(i+1)*batch_size] out_predict_lanmark = sess.run(predict_lanmark, {image: now_batch_faces}) out_predict_lanmarks.append(out_predict_lanmark) out_predict_lanmarks = np.concatenate(out_predict_lanmarks, axis=0) out_predict_lanmarks += pts_mean86 # warp back batch_warp_back_lm = [] for face_index in range(len(batch_bboxes[img_index])): warp_back_lm = tformfwd(trans_invs[face_index], out_predict_lanmarks[face_index]) batch_warp_back_lm.append(
np.reshape(warp_back_lm, [-1])
numpy.reshape
import pickle from flask import Flask, request, render_template import numpy as np import json app = Flask(__name__) with open("project/predict_housing_price.pkl", "rb") as f: model = pickle.load(f) def process_input(request_data:str) -> np.array: """Takes in the input data and converts it to an array that the model can understand""" parsed_body = np.asarray(json.loads(request_data)["inputs"]) assert len(parsed_body.shape) == 2, "'Input must be a 2-D array" return parsed_body def error_check()->str: """Checks for errors and outputs a string""" if (KeyError, json.JSONDecodeError, AssertionError, ValueError): return json.dumps({"error": "Check input"}), 400 else: return json.dumps({"error": "Prediction Failed"}), 500 @app.route("/") def home(): """Renders the main page to the index template""" return render_template('index.html') @app.route('/predict', methods = ["POST"]) def predict(): """An interface for the user that plug in the inputs and receive the price which is the output""" try: features = [float(x) for x in request.form.values()] final_features = [
np.array(features)
numpy.array
""" ================================================================= Plotting cyrstal orbital overlap population obtained from lobster ================================================================= This example shows how to plot overlap population data See http://www.cohp.de/ for details """ import os import numpy as np import matplotlib.pyplot as plt from pdos_overlap.overlap_population import get_example_data from pdos_overlap.overlap_population import OVERLAP_POPULATION from pdos_overlap.plotting_tools import set_figure_settings ####################################################################################### # Load COOPCAR file # ----------------- # # First we will, get the example data, load a COOPCAR file and use it to # instantiate an OVERLAP_POPULATION object set_figure_settings('paper') example_path = get_example_data() COOPCAR = os.path.join(example_path, 'C2H4/COOPCAR.lobster') POP = OVERLAP_POPULATION(COOPCAR) ####################################################################################### # Identify bonding interactions and check for spin # ------------------------------------------------ # print(POP.interactions) print(POP.is_spin) ####################################################################################### # Obtain projected overlap # ------------------------ # # We projected orbital overlap for the C-C bond and C-H bonds in C2H4 # We group the CH bonds and ensure to sum for spins as all electrons are paired CC_overlap = POP.get_pcoop(interactions=[0], sum_pcoop=False, sum_spin=True) CH_overlap = POP.get_pcoop(interactions=[1,2,3,4], sum_pcoop=True, sum_spin=True) ####################################################################################### # Plot the bonding populaiton with respect to the CC and CH bonds # --------------------------------------------------------------- # # A positive value on the x-axis indicates are greater proportion of states in # in the bond than outside of the bond plt.figure(figsize=(3,5)) plt.plot(CC_overlap, POP.get_energies(), zorder=3) plt.plot(CH_overlap, POP.get_energies(), zorder=2) plt.plot([np.min([CC_overlap, CH_overlap]),
np.max([CC_overlap, CH_overlap])
numpy.max
import os import numpy as np import matplotlib matplotlib.use('agg') import matplotlib.pyplot as plt import torch from torch.utils.data import DataLoader from tqdm import tqdm import argparse import cv2 import config from utils import Mesh from models import CMR, NMRRenderer from models.smpl_from_lib import SMPL from utils.pose_utils import compute_similarity_transform_batch, scale_and_translation_transform_batch from utils.cam_utils import orthographic_project_torch, undo_keypoint_normalisation, \ get_intrinsics_matrix, batch_convert_weak_perspective_to_camera_translation from utils.label_conversions import convert_multiclass_to_binary_labels_torch from datasets.sports_videos_eval_dataset import SportsVideosEvalDataset def evaluate_single_in_multitasknet_sports_videos(model, eval_dataset, metrics, device, save_path, num_workers=4, pin_memory=True, vis_every_n_batches=1, output_img_wh=256): eval_dataloader = DataLoader(eval_dataset, batch_size=1, shuffle=False, drop_last=True, num_workers=num_workers, pin_memory=pin_memory) smpl = SMPL(config.SMPL_MODEL_DIR, batch_size=1) smpl.to(device) smpl_male = SMPL(config.SMPL_MODEL_DIR, batch_size=1, gender='male') smpl_male.to(device) smpl_female = SMPL(config.SMPL_MODEL_DIR, batch_size=1, gender='female') smpl_female.to(device) if 'pve' in metrics: pve_smpl_sum = 0.0 pve_graph_sum = 0.0 pve_smpl_per_frame = [] pve_graph_per_frame = [] if 'pve_scale_corrected' in metrics: pve_scale_corrected_smpl_sum = 0.0 pve_scale_corrected_graph_sum = 0.0 pve_scale_corrected_smpl_per_frame = [] pve_scale_corrected_graph_per_frame = [] if 'pve_pa' in metrics: pve_pa_smpl_sum = 0.0 pve_pa_graph_sum = 0.0 pve_pa_smpl_per_frame = [] pve_pa_graph_per_frame = [] if 'pve-t' in metrics: pvet_sum = 0.0 pvet_per_frame = [] if 'pve-t_scale_corrected' in metrics: pvet_scale_corrected_sum = 0.0 pvet_scale_corrected_per_frame = [] if 'silhouette_iou' in metrics: # Set-up NMR renderer to render silhouettes from predicted vertex meshes. # Assuming camera rotation is identity (since it is dealt with by global_orients) cam_K = get_intrinsics_matrix(output_img_wh, output_img_wh, config.FOCAL_LENGTH) cam_K = torch.from_numpy(cam_K.astype(np.float32)).to(device) cam_R = torch.eye(3).to(device) cam_K = cam_K[None, :, :] cam_R = cam_R[None, :, :] nmr_parts_renderer = NMRRenderer(1, cam_K, cam_R, output_img_wh, rend_parts_seg=True).to(device) num_true_positives_smpl = 0.0 num_false_positives_smpl = 0.0 num_true_negatives_smpl = 0.0 num_false_negatives_smpl = 0.0 num_true_positives_graph = 0.0 num_false_positives_graph = 0.0 num_true_negatives_graph = 0.0 num_false_negatives_graph = 0.0 silhouette_iou_smpl_per_frame = [] silhouette_iou_graph_per_frame = [] if 'j2d_l2e' in metrics: j2d_l2e_sum = 0.0 j2d_l2e_per_frame = [] num_samples = 0 num_vertices = 6890 num_joints2d = 17 frame_path_per_frame = [] pose_per_frame = [] shape_per_frame = [] cam_per_frame = [] model.eval() for batch_num, samples_batch in enumerate(tqdm(eval_dataloader)): # ------------------------------- TARGETS and INPUTS ------------------------------- input = samples_batch['input'] input = input.to(device) target_shape = samples_batch['shape'] target_shape = target_shape.to(device) target_vertices = samples_batch['vertices'] target_silhouette = samples_batch['silhouette'] target_joints2d_coco = samples_batch['keypoints'] target_gender = samples_batch['gender'][0] if target_gender == 'm': target_reposed_smpl_output = smpl_male(betas=target_shape) elif target_gender == 'f': target_reposed_smpl_output = smpl_female(betas=target_shape) target_reposed_vertices = target_reposed_smpl_output.vertices # ------------------------------- PREDICTIONS ------------------------------- pred_vertices, pred_vertices_smpl, pred_camera, pred_rotmat, pred_betas = model(input) pred_vertices_projected2d = orthographic_project_torch(pred_vertices, pred_camera) pred_vertices_projected2d = undo_keypoint_normalisation(pred_vertices_projected2d, input.shape[-1]) pred_vertices_smpl_projected2d = orthographic_project_torch(pred_vertices_smpl, pred_camera) pred_vertices_smpl_projected2d = undo_keypoint_normalisation(pred_vertices_smpl_projected2d, input.shape[-1]) pred_reposed_smpl_output = smpl(betas=pred_betas) pred_reposed_vertices = pred_reposed_smpl_output.vertices if 'j2d_l2e' in metrics: pred_smpl_output = smpl(betas=pred_betas, body_pose=pred_rotmat[:, 1:], global_orient=pred_rotmat[:, 0].unsqueeze(1), pose2rot=False) pred_joints_all = pred_smpl_output.joints pred_joints_coco = pred_joints_all[:, config.ALL_JOINTS_TO_COCO_MAP, :] pred_joints2d_coco = orthographic_project_torch(pred_joints_coco, pred_camera) pred_joints2d_coco = undo_keypoint_normalisation(pred_joints2d_coco, output_img_wh) pred_camera = pred_camera.cpu().detach().numpy() if 'silhouette_iou' in metrics: pred_cam_ts = batch_convert_weak_perspective_to_camera_translation(pred_camera, config.FOCAL_LENGTH, output_img_wh) pred_cam_ts = torch.from_numpy(pred_cam_ts).float().to(device) part_seg = nmr_parts_renderer(pred_vertices, pred_cam_ts.unsqueeze(0)) pred_silhouette = convert_multiclass_to_binary_labels_torch(part_seg) pred_silhouette = pred_silhouette.cpu().detach().numpy() part_seg_smpl = nmr_parts_renderer(pred_vertices_smpl, pred_cam_ts.unsqueeze(0)) pred_silhouette_smpl = convert_multiclass_to_binary_labels_torch(part_seg_smpl) pred_silhouette_smpl = pred_silhouette_smpl.cpu().detach().numpy() # Numpy-fying target_vertices = target_vertices.cpu().detach().numpy() target_reposed_vertices = target_reposed_vertices.cpu().detach().numpy() target_silhouette = target_silhouette.numpy() target_joints2d_coco = target_joints2d_coco.numpy() pred_vertices = pred_vertices.cpu().detach().numpy() pred_vertices_smpl = pred_vertices_smpl.cpu().detach().numpy() pred_vertices_projected2d = pred_vertices_projected2d.cpu().detach().numpy() pred_vertices_smpl_projected2d = pred_vertices_smpl_projected2d.cpu().detach().numpy() pred_reposed_vertices = pred_reposed_vertices.cpu().detach().numpy() pred_rotmat = pred_rotmat.cpu().detach().numpy() pred_betas = pred_betas.cpu().detach().numpy() pred_joints2d_coco = pred_joints2d_coco.cpu().detach().numpy() # ------------------------------- METRICS ------------------------------- if 'pve' in metrics: pve_smpl_batch = np.linalg.norm(pred_vertices_smpl - target_vertices, axis=-1) # (1, 6890) pve_graph_batch = np.linalg.norm(pred_vertices - target_vertices, axis=-1) pve_smpl_sum += np.sum(pve_smpl_batch) # scalar pve_graph_sum += np.sum(pve_graph_batch) pve_smpl_per_frame.append(np.mean(pve_smpl_batch, axis=-1)) pve_graph_per_frame.append(np.mean(pve_graph_batch, axis=-1)) # Scale and translation correction if 'pve_scale_corrected' in metrics: pred_vertices_smpl_sc = scale_and_translation_transform_batch(pred_vertices_smpl, target_vertices) pred_vertices_sc = scale_and_translation_transform_batch(pred_vertices, target_vertices) pve_sc_smpl_batch = np.linalg.norm(pred_vertices_smpl_sc - target_vertices, axis=-1) # (1, 6890) pve_sc_graph_batch = np.linalg.norm(pred_vertices_sc - target_vertices, axis=-1) # (1, 6890) pve_scale_corrected_smpl_sum += np.sum(pve_sc_smpl_batch) # scalar pve_scale_corrected_graph_sum += np.sum(pve_sc_graph_batch) # scalar pve_scale_corrected_smpl_per_frame.append(np.mean(pve_sc_smpl_batch, axis=-1)) pve_scale_corrected_graph_per_frame.append(np.mean(pve_sc_graph_batch, axis=-1)) # Procrustes analysis if 'pve_pa' in metrics: pred_vertices_smpl_pa = compute_similarity_transform_batch(pred_vertices_smpl, target_vertices) pred_vertices_pa = compute_similarity_transform_batch(pred_vertices, target_vertices) pve_pa_smpl_batch = np.linalg.norm(pred_vertices_smpl_pa - target_vertices, axis=-1) # (1, 6890) pve_pa_graph_batch = np.linalg.norm(pred_vertices_pa - target_vertices, axis=-1) # (1, 6890) pve_pa_smpl_sum += np.sum(pve_pa_smpl_batch) # scalar pve_pa_graph_sum += np.sum(pve_pa_graph_batch) # scalar pve_pa_smpl_per_frame.append(
np.mean(pve_pa_smpl_batch, axis=-1)
numpy.mean
import os import gym import numpy as np import sys import pybullet from metagym.quadrupedal.robots import robot_config from metagym import quadrupedal from metagym.quadrupedal.robots import action_filter from metagym.quadrupedal.envs.utilities.ETG_model import ETG_layer,ETG_model from copy import copy Param_Dict = {'torso':1.0,'up':0.3,'feet':0.2,'tau':0.1,'done':1,'velx':0,'badfoot':0.1,'footcontact':0.1} Random_Param_Dict = {'random_dynamics':0,'random_force':0} def EnvWrapper(env,param,sensor_mode,normal=0,ETG_T=0.5,enable_action_filter=False, reward_p=1,ETG=1,ETG_path="",ETG_T2=0.5,random_param=None, ETG_H=20,act_mode="traj",vel_d=0.6,vel_mode="max", task_mode="normal",step_y=0.05): env = ETGWrapper(env=env,ETG=ETG,ETG_T=ETG_T,ETG_path=ETG_path, ETG_T2=ETG_T2,ETG_H=ETG_H,act_mode=act_mode, task_mode=task_mode,step_y=step_y) env = ActionFilterWrapper(env=env,enable_action_filter=enable_action_filter) env = RandomWrapper(env=env,random_param=random_param) env = ObservationWrapper(env=env,ETG=ETG,sensor_mode=sensor_mode,normal=normal,ETG_H = ETG_H) env = RewardShaping(env=env,param=param,reward_p=reward_p,vel_d=vel_d,vel_mode=vel_mode) return env class ActionFilterWrapper(gym.Wrapper): def __init__(self,env,enable_action_filter): gym.Wrapper.__init__(self, env) self.robot = self.env.robot self.pybullet_client = self.env.pybullet_client self.observation_space = self.env.observation_space self.action_space = self.env.action_space self.enable_action_filter = enable_action_filter and self.env.ETG.endswith("sac") if self.enable_action_filter: self._action_filter = self._BuildActionFilter() def reset(self,**kwargs): obs_all,info = self.env.reset(**kwargs) self._step_counter = 0 if self.enable_action_filter: self._ResetActionFilter() return obs_all,info def step(self,action,**kwargs): if self.enable_action_filter: action = self._FilterAction(action) obs_all, rew, done, info = self.env.step(action) self._step_counter += 1 return obs_all, rew, done, info def _BuildActionFilter(self): sampling_rate = 1 / self.env.env_time_step num_joints = 12 a_filter = action_filter.ActionFilterButter(sampling_rate=sampling_rate, num_joints=num_joints) return a_filter def _ResetActionFilter(self): self._action_filter.reset() def _FilterAction(self, action): # initialize the filter history, since resetting the filter will fill # the history with zeros and this can cause sudden movements at the start # of each episode if self._step_counter == 0: default_action = np.array([0,0,0]*4) self._action_filter.init_history(default_action) # for j in range(10): # self._action_filter.filter(default_action) filtered_action = self._action_filter.filter(action) # print(filtered_action) return filtered_action class ObservationWrapper(gym.Wrapper): def __init__(self, env,ETG,sensor_mode,normal,ETG_H): gym.Wrapper.__init__(self, env) # print("env_time:",self.env.env_time_step) self.robot = self.env.robot self.pybullet_client = self.env.pybullet_client self.observation_space = self.env.observation_space self.action_space = self.env.action_space self.sensor_mode = sensor_mode self.normal = normal self.ETG_H = ETG_H self.ETG = ETG self.ETG_mean = np.array([2.1505982e-02, 3.6674485e-02, -6.0444288e-02, 2.4625482e-02, 1.5869144e-02, -3.2513142e-02, 2.1506395e-02, 3.1869926e-02, -6.0140789e-02, 2.4625063e-02, 1.1628972e-02, -3.2163858e-02]) self.ETG_std = np.array([4.5967497e-02,2.0340437e-01, 3.7410179e-01, 4.6187632e-02, 1.9441207e-01, 3.9488649e-01, 4.5966785e-02 ,2.0323379e-01, 3.7382501e-01, 4.6188373e-02 ,1.9457331e-01, 3.9302582e-01]) if self.ETG: if "ETG" in self.sensor_mode.keys() and sensor_mode["ETG"] : sensor_shape = self.observation_space.high.shape[0] obs_h = np.array([1]*(sensor_shape+12)) obs_l = np.array([0]*(sensor_shape+12)) self.observation_space = gym.spaces.Box(obs_l,obs_h,dtype=np.float32) if "ETG_obs" in self.sensor_mode.keys() and sensor_mode["ETG_obs"] : sensor_shape = self.observation_space.high.shape[0] obs_h = np.array([1]*(sensor_shape+self.ETG_H)) obs_l = np.array([0]*(sensor_shape+self.ETG_H)) self.observation_space = gym.spaces.Box(obs_l,obs_h,dtype=np.float32) if "force_vec" in self.sensor_mode.keys() and sensor_mode["force_vec"]: sensor_shape = self.observation_space.high.shape[0] obs_h = np.array([1]*(sensor_shape+6)) obs_l = np.array([0]*(sensor_shape+6)) self.observation_space = gym.spaces.Box(obs_l,obs_h,dtype=np.float32) if "dynamic_vec" in self.sensor_mode.keys() and sensor_mode["dynamic_vec"]: sensor_shape = self.observation_space.high.shape[0] obs_h = np.array([1]*(sensor_shape+3)) obs_l = np.array([0]*(sensor_shape+3)) self.observation_space = gym.spaces.Box(obs_l,obs_h,dtype=np.float32) if "yaw" in self.sensor_mode.keys() and sensor_mode["yaw"]: sensor_shape = self.observation_space.high.shape[0] obs_h = np.array([1]*(sensor_shape+2)) obs_l = np.array([0]*(sensor_shape+2)) self.observation_space = gym.spaces.Box(obs_l,obs_h,dtype=np.float32) if "RNN" in self.sensor_mode.keys() and self.sensor_mode["RNN"]["time_steps"]>0: self.time_steps = sensor_mode["RNN"]["time_steps"] self.time_interval = sensor_mode["RNN"]["time_interval"] self.sensor_shape = self.observation_space.high.shape[0] self.obs_history = np.zeros((self.time_steps*self.time_interval,self.sensor_shape)) if sensor_mode["RNN"]["mode"] == "stack": obs_h = np.array([1]*(self.sensor_shape*(self.time_steps+1))) obs_l = np.array([0]*(self.sensor_shape*(self.time_steps+1))) self.observation_space = gym.spaces.Box(obs_l,obs_h,dtype=np.float32) def reset(self,**kwargs): obs,info = self.env.reset(**kwargs) self.dynamic_info = info["dynamics"] if self.ETG: if "ETG" in self.sensor_mode.keys() and self.sensor_mode["ETG"] : ETG_out = info["ETG_act"] if self.normal: ETG_out = (ETG_out-self.ETG_mean)/self.ETG_std obs = np.concatenate((obs,ETG_out),axis = 0) if "ETG_obs" in self.sensor_mode.keys() and self.sensor_mode["ETG_obs"] : ETG_obs = info["ETG_obs"] obs = np.concatenate((obs,ETG_obs),axis = 0) if "force_vec" in self.sensor_mode.keys() and self.sensor_mode["force_vec"]: force_vec = info["force_vec"] obs = np.concatenate((obs,force_vec),axis = 0) if "dynamic_vec" in self.sensor_mode.keys() and self.sensor_mode["dynamic_vec"]: dynamic_vec = self.dynamic_info obs = np.concatenate((obs,dynamic_vec),axis = 0) if "yaw" in self.sensor_mode.keys() and self.sensor_mode["yaw"]: if "d_yaw" in kwargs.keys(): d_yaw = kwargs["d_yaw"] else: d_yaw = 0 yaw_now = info["pose"][-1] yaw_info = np.array([np.cos(d_yaw-yaw_now),np.sin(d_yaw-yaw_now)]) obs = np.concatenate((obs,yaw_info),axis = 0) if "RNN" in self.sensor_mode.keys() and self.sensor_mode["RNN"]["time_steps"]>0: self.obs_history = np.zeros((self.time_steps*self.time_interval,self.sensor_shape)) obs_list = [] for t in range(self.time_steps): obs_list.append(copy(self.obs_history[t*self.time_interval])) obs_list.append(copy(obs)) self.obs_history[-1] = copy(obs) if self.sensor_mode["RNN"]["mode"]=="GRU": obs = np.stack(obs_list,axis=0) elif self.sensor_mode["RNN"]["mode"]=="stack": obs = np.array(obs_list).reshape(-1) return obs,info def step(self,action,**kwargs): obs, rew, done, info = self.env.step(action, **kwargs) if self.ETG: if "ETG" in self.sensor_mode.keys() and self.sensor_mode["ETG"] : ETG_out = info["ETG_act"] if self.normal: ETG_out = (ETG_out-self.ETG_mean)/self.ETG_std obs = np.concatenate((obs,ETG_out),axis = 0) if "ETG_obs" in self.sensor_mode.keys() and self.sensor_mode["ETG_obs"] : ETG_obs = info["ETG_obs"] obs = np.concatenate((obs,ETG_obs),axis = 0) if "force_vec" in self.sensor_mode.keys() and self.sensor_mode["force_vec"]: force_vec = info["force_vec"] obs = np.concatenate((obs,force_vec),axis = 0) if "dynamic_vec" in self.sensor_mode.keys() and self.sensor_mode["dynamic_vec"]: dynamic_vec = self.dynamic_info obs = np.concatenate((obs,dynamic_vec),axis = 0) if "yaw" in self.sensor_mode.keys() and self.sensor_mode["yaw"]: if "d_yaw" in kwargs.keys(): d_yaw = kwargs["d_yaw"] else: d_yaw = 0 yaw_now = info["pose"][-1] yaw_info = np.array([np.cos(d_yaw-yaw_now),np.sin(d_yaw-yaw_now)]) obs = np.concatenate((obs,yaw_info),axis = 0) if "RNN" in self.sensor_mode.keys() and self.sensor_mode["RNN"]["time_steps"]>0: obs_list = [] for t in range(self.time_steps): obs_list.append(copy(self.obs_history[t*self.time_interval])) obs_list.append(copy(obs)) self.obs_history[:-1] = copy(self.obs_history[1:]) self.obs_history[-1] = copy(obs) if self.sensor_mode["RNN"]["mode"]=="GRU": obs = np.stack(obs_list,axis=0) elif self.sensor_mode["RNN"]["mode"]=="stack": obs = np.array(obs_list).reshape(-1) return obs,rew,done,info class ETGWrapper(gym.Wrapper): def __init__(self, env,ETG,ETG_T,ETG_path,ETG_T2,ETG_H=20,act_mode="traj",task_mode="normal",step_y=0.05): gym.Wrapper.__init__(self, env) self.robot = self.env.robot self.pybullet_client = self.env.pybullet_client self.observation_space = self.env.observation_space self.action_space = self.env.action_space self.ETG_T2 = ETG_T2 self.ETG_T = ETG_T self.ETG_H = ETG_H self.act_mode = act_mode self.step_y = step_y self.task_mode = task_mode self.ETG = ETG phase = np.array([-np.pi/2,0]) if self.ETG: self.ETG_agent = ETG_layer(self.ETG_T,self.env.env_time_step,self.ETG_H,0.04,phase,0.2,self.ETG_T2) self.ETG_weight = 1 if len(ETG_path)>1 and os.path.exists(ETG_path): info = np.load(ETG_path) self.ETG_w = info["w"] self.ETG_b = info["b"] else: self.ETG_w = np.zeros((3,ETG_H)) self.ETG_b = np.zeros(3) self.ETG_model = ETG_model(task_mode=self.task_mode,act_mode=act_mode,step_y=self.step_y) self.last_ETG_act = np.zeros(12) self.last_ETG_obs = np.zeros(self.ETG_H) def reset(self,**kwargs): kwargs["info"] = True obs,info = self.env.reset(**kwargs) if self.ETG: if "ETG_w" in kwargs.keys() and kwargs["ETG_w"] is not None: self.ETG_w = kwargs["ETG_w"] if "ETG_b" in kwargs.keys() and kwargs["ETG_b"] is not None: self.ETG_b = kwargs["ETG_b"] self.ETG_agent.reset() state = self.ETG_agent.update2(t=self.env.get_time_since_reset()) act_ref = self.ETG_model.forward(self.ETG_w,self.ETG_b,state) act_ref = self.ETG_model.act_clip(act_ref,self.robot) self.last_ETG_act = act_ref*self.ETG_weight info["ETG_obs"] = state[0] info["ETG_act"] = self.last_ETG_act return obs,info def step(self,action,**kwargs): if self.ETG: action = np.asarray(action).reshape(-1)+self.last_ETG_act state = self.ETG_agent.update2(t=self.env.get_time_since_reset()) act_ref = self.ETG_model.forward(self.ETG_w,self.ETG_b,state) action_before = act_ref act_ref = self.ETG_model.act_clip(act_ref,self.robot) self.last_ETG_act = act_ref*self.ETG_weight obs, rew, done, info = self.env.step(action) info["ETG_obs"] = state[0] info["ETG_act"] = self.last_ETG_act else: obs, rew, done, info = self.env.step(action) return obs,rew,done,info class RewardShaping(gym.Wrapper): def __init__(self, env,param,reward_p=1,vel_d=0.6,vel_mode="max"): gym.Wrapper.__init__(self, env) self.param = param self.reward_p = reward_p self.last_base10 = np.zeros((10,3)) self.robot = self.env.robot self.pybullet_client = self.env.pybullet_client self.observation_space = self.env.observation_space self.action_space = self.env.action_space self.vel_d = vel_d self.steps = 0 self.vel_mode = vel_mode self.yaw_init = 0.0 def reset(self,**kwargs): self.steps = 0 obs,info = self.env.reset(**kwargs) self.yaw_init = info["yaw_init"] obs, rew, done, infos = self.env.step(np.zeros(self.action_space.high.shape[0])) self.last_basepose = info["base"] self.last_footposition = self.get_foot_world(info) base_pose = info["base"] self.last_base10 = np.tile(base_pose,(10,1)) info["foot_position_world"] = copy(self.last_footposition) info["scene"] = "plane" if "d_yaw" in kwargs.keys(): info['d_yaw'] = kwargs["d_yaw"] else: info['d_yaw'] = 0 if self.render: self.line_id = self.draw_direction(info) return obs,info def step(self,action,**kwargs): self.steps+=1 obs, rew, done, info = self.env.step(action, **kwargs) self.env_vec = np.array([0,0,0,0,0,0,0]) posex = info["base"][0] for env_v in info["env_info"]: if posex+0.2 >= env_v[0] and posex+0.2 <= env_v[1]: self.env_vec = env_v[2] break if self.env_vec[0]: info["scene"] = "upslope" elif self.env_vec[1]: info["scene"] = "downslope" elif self.env_vec[2]: info["scene"] = "upstair" elif self.env_vec[3]: info["scene"] = "downstair" else: info["scene"] = "plane" v = (np.array(info["base"])-np.array(self.last_basepose))/0.026 if "d_yaw" in kwargs.keys(): info['d_yaw'] = kwargs["d_yaw"] else: info['d_yaw'] = 0 donef = kwargs["donef"] if "donef" in kwargs.keys() else False info = self.reward_shaping(obs, rew, done, info,action,donef) info["vel"] = v rewards = 0 done = self.terminate(info) if done: info["done"] = -1 else: info["done"] = 0 for key in Param_Dict.keys(): if key in info.keys(): # print(key) rewards+= info[key] info["velx"] = rew self.last_basepose = copy(info["base"]) self.last_base10[1:,:] = self.last_base10[:9,:] self.last_base10[0,:] = np.array(info['base']).reshape(1,3) self.last_footposition = self.get_foot_world(info) info["foot_position_world"] = copy(self.last_footposition) if self.render: self.pybullet_client.removeUserDebugItem(self.line_id) self.line_id = self.draw_direction(info) return (obs, self.reward_p*rewards, done, info) def reward_shaping(self,obs, rew, done, info,action,donef,last_basepose=None,last_footposition=None): torso = self.re_torso(info,last_basepose=last_basepose) info['torso'] = self.param['torso']*torso if last_basepose is None: v = (np.array(info["base"])-np.array(self.last_basepose))/0.026 else: v = (np.array(info["base"])-np.array(last_basepose))/0.026 k = 1-self.c_prec(min(v[0],self.vel_d),self.vel_d,0.5) info['up'] = (self.param['up'])*self.re_up(info)*k info['feet'] = self.param['feet']*self.re_feet(info,last_footposition=last_footposition) info['tau'] = -self.param['tau']*info['energy']*k info['badfoot'] = -self.param['badfoot']*self.robot.GetBadFootContacts() lose_contact_num = np.sum(1.0-np.array(info["real_contact"])) info['footcontact'] = -self.param['footcontact']*max(lose_contact_num-2,0) return info def draw_direction(self,info): pose = info['base'] if self.render: id = self.pybullet_client.addUserDebugLine(lineFromXYZ=[pose[0],pose[1],0.6], lineToXYZ=[pose[0]+np.cos(info['d_yaw']),pose[1]+np.sin(info['d_yaw']),0.6], lineColorRGB=[1,0,1],lineWidth=2) return id def terminate(self,info): rot_mat = info["rot_mat"] pose = info["pose"] footposition = copy(info["footposition"]) footz = footposition[:,-1] base = info["base"] base_std = np.sum(np.std(self.last_base10,axis=0)) return rot_mat[-1]<0.5 or np.mean(footz)>-0.1 or np.max(footz)>0 or (base_std<=2e-4 and self.steps>=10) or abs(pose[-1])>0.6 def _calc_torque_reward(self): energy = self.robot.GetEnergyConsumptionPerControlStep() return -energy def re_still(self,info): v = (np.array(info["base"])-np.array(self.last_basepose))/0.026 return -np.linalg.norm(v) def re_standupright(self,info): still = self.re_still(info) up = self.re_up(info) return self.re_rot(info,still+up) def re_up(self,info): posex = info["base"][0] env_vec = np.zeros(7) for env_v in info["env_info"]: if posex+0.2 >= env_v[0] and posex+0.2 <= env_v[1]: env_vec = env_v[2] break pose = copy(info["pose"]) roll = pose[0] pitch = pose[1] if env_vec[0]: pitch += abs(env_vec[4]) elif env_vec[1]: pitch -= abs(env_vec[4]) r = np.sqrt(roll**2+pitch**2) return 1-self.c_prec(r,0,0.4) def re_rot(self,info,r): pose = copy(info["pose"]) yaw = pose[-1] k1 = 1-self.c_prec(yaw,info['d_yaw'],0.5) k2 = 1-self.c_prec(yaw,info['d_yaw']+2*np.pi,0.5) k3 = 1-self.c_prec(yaw,info['d_yaw']-2*np.pi,0.5) k = max(k1,k2,k3) return min(k*r,r) def c_prec(self,v,t,m): if m<1e-5: print(m) w = np.arctanh(np.sqrt(0.95))/m return np.tanh(np.power((v-t)*w,2)) def re_feet(self,info,vd=[1,0,0],last_footposition=None): vd[0] = np.cos(info['d_yaw']) vd[1] = np.sin(info['d_yaw']) posex = info["base"][0] env_vec = np.zeros(7) for env_v in info["env_info"]: if posex+0.2 >= env_v[0] and posex+0.2 <= env_v[1]: env_vec = env_v[2] break if env_vec[0]: vd[0] *= abs(np.cos(env_vec[4])) vd[1] *= abs(np.cos(env_vec[4])) vd[2] = abs(np.sin(env_vec[4])) elif env_vec[1]: vd[0] *= abs(np.cos(env_vec[4])) vd[1] *= abs(np.cos(env_vec[4])) vd[2] = -abs(np.sin(env_vec[4])) foot = self.get_foot_world(info) if last_footposition is None: d_foot = (foot-self.last_footposition)/0.026 else: d_foot = (foot-last_footposition)/0.026 v_sum = 0 contact = copy(info["real_contact"]) for i in range(4): v = d_foot[i] v_ = v[0]*vd[0]+v[1]*vd[1]+v[2]*vd[2] r = min(v_,self.vel_d)/4.0 v_sum += min(r,1.0*r) return self.re_rot(info,v_sum) def get_foot_world(self,info={}): if "footposition" in info.keys(): foot = np.array(info["footposition"]).transpose() rot_mat = np.array(info["rot_mat"]).reshape(-1,3) base = np.array(info["base"]).reshape(-1,1) else: foot = np.array(self.robot.GetFootPositionsInBaseFrame()).transpose() rot_quat = self.robot.GetBaseOrientation() rot_mat = np.array(self.pybullet_client.getMatrixFromQuaternion(rot_quat)).reshape(-1,3) base = np.array(self.robot.GetBasePosition()).reshape(-1,1) print("no!") foot_world = rot_mat.dot(foot)+base return foot_world.transpose() def re_torso(self,info,vd = [1,0,0],last_basepose = None): if last_basepose is None: v = (np.array(info["base"])-np.array(self.last_basepose))/0.026 else: v = (np.array(info["base"])-
np.array(last_basepose)
numpy.array
from __future__ import division, print_function import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages from mpl_toolkits.axes_grid1 import make_axes_locatable from mpl_toolkits.mplot3d import Axes3D import streakline #import streakline2 import myutils import ffwd from streams import load_stream, vcirc_potential, store_progparams, wrap_angles, progenitor_prior #import streams import astropy import astropy.units as u from astropy.constants import G from astropy.table import Table import astropy.coordinates as coord import gala.coordinates as gc import scipy.linalg as la import scipy.interpolate import scipy.optimize import zscale import itertools import copy import pickle # observers # defaults taken as in astropy v2.0 icrs mw_observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 0*u.deg, 'galcen_coord': coord.SkyCoord(ra=266.4051*u.deg, dec=-28.936175*u.deg, frame='icrs')} vsun = {'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s} vsun0 = {'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s} gc_observer = {'z_sun': 27.*u.pc, 'galcen_distance': 0.1*u.kpc, 'roll': 0*u.deg, 'galcen_coord': coord.SkyCoord(ra=266.4051*u.deg, dec=-28.936175*u.deg, frame='icrs')} vgc = {'vcirc': 0*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s} vgc0 = {'vcirc': 0*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s} MASK = -9999 pparams_fid = [np.log10(0.5e10)*u.Msun, 0.7*u.kpc, np.log10(6.8e10)*u.Msun, 3*u.kpc, 0.28*u.kpc, 430*u.km/u.s, 30*u.kpc, 1.57*u.rad, 1*u.Unit(1), 1*u.Unit(1), 1*u.Unit(1), 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0*u.deg, 0*u.deg, 0*u.kpc, 0*u.km/u.s, 0*u.mas/u.yr, 0*u.mas/u.yr] #pparams_fid = [0.5e-5*u.Msun, 0.7*u.kpc, 6.8e-5*u.Msun, 3*u.kpc, 0.28*u.kpc, 430*u.km/u.s, 30*u.kpc, 1.57*u.rad, 1*u.Unit(1), 1*u.Unit(1), 1*u.Unit(1), 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0*u.deg, 0*u.deg, 0*u.kpc, 0*u.km/u.s, 0*u.mas/u.yr, 0*u.mas/u.yr] class Stream(): def __init__(self, x0=[]*u.kpc, v0=[]*u.km/u.s, progenitor={'coords': 'galactocentric', 'observer': {}, 'pm_polar': False}, potential='nfw', pparams=[], minit=2e4*u.Msun, mfinal=2e4*u.Msun, rcl=20*u.pc, dr=0.5, dv=2*u.km/u.s, dt=1*u.Myr, age=6*u.Gyr, nstars=600, integrator='lf'): """Initialize """ setup = {} if progenitor['coords']=='galactocentric': setup['x0'] = x0 setup['v0'] = v0 elif (progenitor['coords']=='equatorial') & (len(progenitor['observer'])!=0): if progenitor['pm_polar']: a = v0[1].value phi = v0[2].value v0[1] = a*np.sin(phi)*u.mas/u.yr v0[2] = a*np.cos(phi)*u.mas/u.yr # convert positions xeq = coord.SkyCoord(x0[0], x0[1], x0[2], **progenitor['observer']) xgal = xeq.transform_to(coord.Galactocentric) setup['x0'] = [xgal.x.to(u.kpc), xgal.y.to(u.kpc), xgal.z.to(u.kpc)]*u.kpc # convert velocities setup['v0'] = gc.vhel_to_gal(xeq.icrs, rv=v0[0], pm=v0[1:], **vsun) #setup['v0'] = [v.to(u.km/u.s) for v in vgal]*u.km/u.s else: raise ValueError('Observer position needed!') setup['dr'] = dr setup['dv'] = dv setup['minit'] = minit setup['mfinal'] = mfinal setup['rcl'] = rcl setup['dt'] = dt setup['age'] = age setup['nstars'] = nstars setup['integrator'] = integrator setup['potential'] = potential setup['pparams'] = pparams self.setup = setup self.setup_aux = {} self.fill_intid() self.fill_potid() self.st_params = self.format_input() def fill_intid(self): """Assign integrator ID for a given integrator choice Assumes setup dictionary has an 'integrator' key""" if self.setup['integrator']=='lf': self.setup_aux['iaux'] = 0 elif self.setup['integrator']=='rk': self.setup_aux['iaux'] = 1 def fill_potid(self): """Assign potential ID for a given potential choice Assumes d has a 'potential' key""" if self.setup['potential']=='nfw': self.setup_aux['paux'] = 3 elif self.setup['potential']=='log': self.setup_aux['paux'] = 2 elif self.setup['potential']=='point': self.setup_aux['paux'] = 0 elif self.setup['potential']=='gal': self.setup_aux['paux'] = 4 elif self.setup['potential']=='lmc': self.setup_aux['paux'] = 6 elif self.setup['potential']=='dipole': self.setup_aux['paux'] = 8 elif self.setup['potential']=='quad': self.setup_aux['paux'] = 9 elif self.setup['potential']=='octu': self.setup_aux['paux'] = 10 def format_input(self): """Format input parameters for streakline.stream""" p = [None]*12 # progenitor position p[0] = self.setup['x0'].si.value p[1] = self.setup['v0'].si.value # potential parameters p[2] = [x.si.value for x in self.setup['pparams']] # stream smoothing offsets p[3] = [self.setup['dr'], self.setup['dv'].si.value] # potential and integrator choice p[4] = self.setup_aux['paux'] p[5] = self.setup_aux['iaux'] # number of steps and stream stars p[6] = int(self.setup['age']/self.setup['dt']) p[7] = int(p[6]/self.setup['nstars']) # cluster properties p[8] = self.setup['minit'].si.value p[9] = self.setup['mfinal'].si.value p[10] = self.setup['rcl'].si.value # time step p[11] = self.setup['dt'].si.value return p def generate(self): """Create streakline model for a stream of set parameters""" #xm1, xm2, xm3, xp1, xp2, xp3, vm1, vm2, vm3, vp1, vp2, vp3 = streakline.stream(*p) stream = streakline.stream(*self.st_params) self.leading = {} self.leading['x'] = stream[:3]*u.m self.leading['v'] = stream[6:9]*u.m/u.s self.trailing = {} self.trailing['x'] = stream[3:6]*u.m self.trailing['v'] = stream[9:12]*u.m/u.s def observe(self, mode='cartesian', wangle=0*u.deg, units=[], errors=[], nstars=-1, sequential=False, present=[], logerr=False, observer={'z_sun': 0.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 0*u.deg, 'galcen_ra': 300*u.deg, 'galcen_dec': 20*u.deg}, vobs={'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}, footprint='none', rotmatrix=None): """Observe the stream stream.obs holds all observations stream.err holds all errors""" x = np.concatenate((self.leading['x'].to(u.kpc).value, self.trailing['x'].to(u.kpc).value), axis=1) * u.kpc v = np.concatenate((self.leading['v'].to(u.km/u.s).value, self.trailing['v'].to(u.km/u.s).value), axis=1) * u.km/u.s if mode=='cartesian': # returns coordinates in following order # x(x, y, z), v(vx, vy, vz) if len(units)<2: units.append(self.trailing['x'].unit) units.append(self.trailing['v'].unit) if len(errors)<2: errors.append(0.2*u.kpc) errors.append(2*u.km/u.s) # positions x = x.to(units[0]) ex = np.ones(np.shape(x))*errors[0] ex = ex.to(units[0]) # velocities v = v.to(units[1]) ev = np.ones(np.shape(v))*errors[1] ev = ev.to(units[1]) self.obs = np.concatenate([x,v]).value self.err = np.concatenate([ex,ev]).value elif mode=='equatorial': # assumes coordinates in the following order: # ra, dec, distance, vrad, mualpha, mudelta if len(units)!=6: units = [u.deg, u.deg, u.kpc, u.km/u.s, u.mas/u.yr, u.mas/u.yr] if len(errors)!=6: errors = [0.2*u.deg, 0.2*u.deg, 0.5*u.kpc, 1*u.km/u.s, 0.2*u.mas/u.yr, 0.2*u.mas/u.yr] # define reference frame xgal = coord.Galactocentric(x, **observer) #frame = coord.Galactocentric(**observer) # convert xeq = xgal.transform_to(coord.ICRS) veq = gc.vgal_to_hel(xeq, v, **vobs) # store coordinates ra, dec, dist = [xeq.ra.to(units[0]).wrap_at(wangle), xeq.dec.to(units[1]), xeq.distance.to(units[2])] vr, mua, mud = [veq[2].to(units[3]), veq[0].to(units[4]), veq[1].to(units[5])] obs = np.hstack([ra, dec, dist, vr, mua, mud]).value obs = np.reshape(obs,(6,-1)) if footprint=='sdss': infoot = dec > -2.5*u.deg obs = obs[:,infoot] if np.allclose(rotmatrix, np.eye(3))!=1: xi, eta = myutils.rotate_angles(obs[0], obs[1], rotmatrix) obs[0] = xi obs[1] = eta self.obs = obs # store errors err = np.ones(np.shape(self.obs)) if logerr: for i in range(6): err[i] *= np.exp(errors[i].to(units[i]).value) else: for i in range(6): err[i] *= errors[i].to(units[i]).value self.err = err self.obsunit = units self.obserror = errors # randomly select nstars from the stream if nstars>-1: if sequential: select = np.linspace(0, np.shape(self.obs)[1], nstars, endpoint=False, dtype=int) else: select = np.random.randint(low=0, high=np.shape(self.obs)[1], size=nstars) self.obs = self.obs[:,select] self.err = self.err[:,select] # include only designated dimensions if len(present)>0: self.obs = self.obs[present] self.err = self.err[present] self.obsunit = [ self.obsunit[x] for x in present ] self.obserror = [ self.obserror[x] for x in present ] def prog_orbit(self): """Generate progenitor orbital history""" orbit = streakline.orbit(self.st_params[0], self.st_params[1], self.st_params[2], self.st_params[4], self.st_params[5], self.st_params[6], self.st_params[11], -1) self.orbit = {} self.orbit['x'] = orbit[:3]*u.m self.orbit['v'] = orbit[3:]*u.m/u.s def project(self, name, N=1000, nbatch=-1): """Project the stream from observed to native coordinates""" poly = np.loadtxt("../data/{0:s}_all.txt".format(name)) self.streak = np.poly1d(poly) self.streak_x = np.linspace(np.min(self.obs[0])-2, np.max(self.obs[0])+2, N) self.streak_y = np.polyval(self.streak, self.streak_x) self.streak_b = np.zeros(N) self.streak_l = np.zeros(N) pdot = np.polyder(poly) for i in range(N): length = scipy.integrate.quad(self._delta_path, self.streak_x[0], self.streak_x[i], args=(pdot,)) self.streak_l[i] = length[0] XB = np.transpose(np.vstack([self.streak_x, self.streak_y])) n = np.shape(self.obs)[1] if nbatch<0: nstep = 0 nbatch = -1 else: nstep = np.int(n/nbatch) i1 = 0 i2 = nbatch for i in range(nstep): XA = np.transpose(np.vstack([np.array(self.obs[0][i1:i2]), np.array(self.obs[1][i1:i2])])) self.emdist(XA, XB, i1=i1, i2=i2) i1 += nbatch i2 += nbatch XA = np.transpose(np.vstack([np.array(self.catalog['ra'][i1:]), np.array(self.catalog['dec'][i1:])])) self.emdist(XA, XB, i1=i1, i2=n) #self.catalog.write("../data/{0:s}_footprint_catalog.txt".format(self.name), format='ascii.commented_header') def emdist(self, XA, XB, i1=0, i2=-1): """""" distances = scipy.spatial.distance.cdist(XA, XB) self.catalog['b'][i1:i2] = np.min(distances, axis=1) imin = np.argmin(distances, axis=1) self.catalog['b'][i1:i2][self.catalog['dec'][i1:i2]<self.streak_y[imin]] *= -1 self.catalog['l'][i1:i2] = self.streak_l[imin] def _delta_path(self, x, pdot): """Return integrand for calculating length of a path along a polynomial""" return np.sqrt(1 + np.polyval(pdot, x)**2) def plot(self, mode='native', fig=None, color='k', **kwargs): """Plot stream""" # Plotting if fig==None: plt.close() plt.figure() ax = plt.axes([0.12,0.1,0.8,0.8]) if mode=='native': # Color setup cindices = np.arange(self.setup['nstars']) # colors of stream particles nor = mpl.colors.Normalize(vmin=0, vmax=self.setup['nstars']) # colormap normalization plt.plot(self.setup['x0'][0].to(u.kpc).value, self.setup['x0'][2].to(u.kpc).value, 'wo', ms=10, mew=2, zorder=3) plt.scatter(self.trailing['x'][0].to(u.kpc).value, self.trailing['x'][2].to(u.kpc).value, s=30, c=cindices, cmap='winter', norm=nor, marker='o', edgecolor='none', lw=0, alpha=0.1) plt.scatter(self.leading['x'][0].to(u.kpc).value, self.leading['x'][2].to(u.kpc).value, s=30, c=cindices, cmap='autumn', norm=nor, marker='o', edgecolor='none', lw=0, alpha=0.1) plt.xlabel("X (kpc)") plt.ylabel("Z (kpc)") elif mode=='observed': plt.subplot(221) plt.plot(self.obs[0], self.obs[1], 'o', color=color, **kwargs) plt.xlabel("RA") plt.ylabel("Dec") plt.subplot(223) plt.plot(self.obs[0], self.obs[2], 'o', color=color, **kwargs) plt.xlabel("RA") plt.ylabel("Distance") plt.subplot(222) plt.plot(self.obs[3], self.obs[4], 'o', color=color, **kwargs) plt.xlabel("V$_r$") plt.ylabel("$\mu\\alpha$") plt.subplot(224) plt.plot(self.obs[3], self.obs[5], 'o', color=color, **kwargs) plt.xlabel("V$_r$") plt.ylabel("$\mu\delta$") plt.tight_layout() #plt.minorticks_on() def read(self, fname, units={'x': u.kpc, 'v': u.km/u.s}): """Read stream star positions from a file""" t = np.loadtxt(fname).T n = np.shape(t)[1] ns = int((n-1)/2) self.setup['nstars'] = ns # progenitor self.setup['x0'] = t[:3,0] * units['x'] self.setup['v0'] = t[3:,0] * units['v'] # leading tail self.leading = {} self.leading['x'] = t[:3,1:ns+1] * units['x'] self.leading['v'] = t[3:,1:ns+1] * units['v'] # trailing tail self.trailing = {} self.trailing['x'] = t[:3,ns+1:] * units['x'] self.trailing['v'] = t[3:,ns+1:] * units['v'] def save(self, fname): """Save stream star positions to a file""" # define table t = Table(names=('x', 'y', 'z', 'vx', 'vy', 'vz')) # add progenitor info t.add_row(np.ravel([self.setup['x0'].to(u.kpc).value, self.setup['v0'].to(u.km/u.s).value])) # add leading tail infoobsmode tt = Table(np.concatenate((self.leading['x'].to(u.kpc).value, self.leading['v'].to(u.km/u.s).value)).T, names=('x', 'y', 'z', 'vx', 'vy', 'vz')) t = astropy.table.vstack([t,tt]) # add trailing tail info tt = Table(np.concatenate((self.trailing['x'].to(u.kpc).value, self.trailing['v'].to(u.km/u.s).value)).T, names=('x', 'y', 'z', 'vx', 'vy', 'vz')) t = astropy.table.vstack([t,tt]) # save to file t.write(fname, format='ascii.commented_header') # make a streakline model of a stream def stream_model(name='gd1', pparams0=pparams_fid, dt=0.2*u.Myr, rotmatrix=np.eye(3), graph=False, graphsave=False, observer=mw_observer, vobs=vsun, footprint='', obsmode='equatorial'): """Create a streakline model of a stream baryonic component as in kupper+2015: 3.4e10*u.Msun, 0.7*u.kpc, 1e11*u.Msun, 6.5*u.kpc, 0.26*u.kpc""" # vary progenitor parameters mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb')) for i in range(3): mock['x0'][i] += pparams0[26+i] mock['v0'][i] += pparams0[29+i] # vary potential parameters potential = 'octu' pparams = pparams0[:26] #print(pparams[0]) pparams[0] = (10**pparams0[0].value)*pparams0[0].unit pparams[2] = (10**pparams0[2].value)*pparams0[2].unit #pparams[0] = pparams0[0]*1e15 #pparams[2] = pparams0[2]*1e15 #print(pparams[0]) # adjust circular velocity in this halo vobs['vcirc'] = vcirc_potential(observer['galcen_distance'], pparams=pparams) # create a model stream with these parameters params = {'generate': {'x0': mock['x0'], 'v0': mock['v0'], 'progenitor': {'coords': 'equatorial', 'observer': mock['observer'], 'pm_polar': False}, 'potential': potential, 'pparams': pparams, 'minit': mock['mi'], 'mfinal': mock['mf'], 'rcl': 20*u.pc, 'dr': 0., 'dv': 0*u.km/u.s, 'dt': dt, 'age': mock['age'], 'nstars': 400, 'integrator': 'lf'}, 'observe': {'mode': mock['obsmode'], 'wangle': mock['wangle'], 'nstars':-1, 'sequential':True, 'errors': [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s, 0.5*u.mas/u.yr, 0.5*u.mas/u.yr], 'present': [0,1,2,3,4,5], 'observer': mock['observer'], 'vobs': mock['vobs'], 'footprint': mock['footprint'], 'rotmatrix': rotmatrix}} stream = Stream(**params['generate']) stream.generate() stream.observe(**params['observe']) ################################ # Plot observed stream and model if graph: observed = load_stream(name) Ndim = np.shape(observed.obs)[0] modcol = 'k' obscol = 'orange' ylabel = ['Dec (deg)', 'Distance (kpc)', 'Radial velocity (km/s)'] plt.close() fig, ax = plt.subplots(1, 3, figsize=(12,4)) for i in range(3): plt.sca(ax[i]) plt.gca().invert_xaxis() plt.xlabel('R.A. (deg)') plt.ylabel(ylabel[i]) plt.plot(observed.obs[0], observed.obs[i+1], 's', color=obscol, mec='none', ms=8, label='Observed stream') plt.plot(stream.obs[0], stream.obs[i+1], 'o', color=modcol, mec='none', ms=4, label='Fiducial model') if i==0: plt.legend(frameon=False, handlelength=0.5, fontsize='small') plt.tight_layout() if graphsave: plt.savefig('../plots/mock_observables_{}_p{}.png'.format(name, potential), dpi=150) return stream def progenitor_params(n): """Return progenitor parameters for a given stream""" if n==-1: age = 1.6*u.Gyr mi = 1e4*u.Msun mf = 2e-1*u.Msun x0, v0 = gd1_coordinates(observer=mw_observer) elif n==-2: age = 2.7*u.Gyr mi = 1e5*u.Msun mf = 2e4*u.Msun x0, v0 = pal5_coordinates(observer=mw_observer, vobs=vsun0) elif n==-3: age = 3.5*u.Gyr mi = 5e4*u.Msun mf = 2e-1*u.Msun x0, v0 = tri_coordinates(observer=mw_observer) elif n==-4: age = 2*u.Gyr mi = 2e4*u.Msun mf = 2e-1*u.Msun x0, v0 = atlas_coordinates(observer=mw_observer) out = {'x0': x0, 'v0': v0, 'age': age, 'mi': mi, 'mf': mf} return out def gal2eq(x, v, observer=mw_observer, vobs=vsun0): """""" # define reference frame xgal = coord.Galactocentric(np.array(x)[:,np.newaxis]*u.kpc, **observer) # convert xeq = xgal.transform_to(coord.ICRS) veq = gc.vgal_to_hel(xeq, np.array(v)[:,np.newaxis]*u.km/u.s, **vobs) # store coordinates units = [u.deg, u.deg, u.kpc, u.km/u.s, u.mas/u.yr, u.mas/u.yr] xobs = [xeq.ra.to(units[0]), xeq.dec.to(units[1]), xeq.distance.to(units[2])] vobs = [veq[2].to(units[3]), veq[0].to(units[4]), veq[1].to(units[5])] return(xobs, vobs) def gd1_coordinates(observer=mw_observer): """Approximate GD-1 progenitor coordinates""" x = coord.SkyCoord(ra=154.377*u.deg, dec=41.5309*u.deg, distance=8.2*u.kpc, **observer) x_ = x.galactocentric x0 = [x_.x.value, x_.y.value, x_.z.value] v0 = [-90, -250, -120] return (x0, v0) def pal5_coordinates(observer=mw_observer, vobs=vsun0): """Pal5 coordinates""" # sdss ra = 229.0128*u.deg dec = -0.1082*u.deg # bob's rrlyrae d = 21.7*u.kpc # harris #d = 23.2*u.kpc # odenkirchen 2002 vr = -58.7*u.km/u.s # fritz & kallivayalil 2015 mua = -2.296*u.mas/u.yr mud = -2.257*u.mas/u.yr d = 24*u.kpc x = coord.SkyCoord(ra=ra, dec=dec, distance=d, **observer) x0 = x.galactocentric v0 = gc.vhel_to_gal(x.icrs, rv=vr, pm=[mua, mud], **vobs).to(u.km/u.s) return ([x0.x.value, x0.y.value, x0.z.value], v0.value.tolist()) def tri_coordinates(observer=mw_observer): """Approximate Triangulum progenitor coordinates""" x = coord.SkyCoord(ra=22.38*u.deg, dec=30.26*u.deg, distance=33*u.kpc, **observer) x_ = x.galactocentric x0 = [x_.x.value, x_.y.value, x_.z.value] v0 = [-40, 155, 155] return (x0, v0) def atlas_coordinates(observer=mw_observer): """Approximate ATLAS progenitor coordinates""" x = coord.SkyCoord(ra=20*u.deg, dec=-27*u.deg, distance=20*u.kpc, **observer) x_ = x.galactocentric x0 = [x_.x.value, x_.y.value, x_.z.value] v0 = [40, 150, -120] return (x0, v0) # great circle orientation def find_greatcircle(stream=None, name='gd1', pparams=pparams_fid, dt=0.2*u.Myr, save=True, graph=True): """Save rotation matrix for a stream model""" if stream==None: stream = stream_model(name, pparams0=pparams, dt=dt) # find the pole ra = np.radians(stream.obs[0]) dec = np.radians(stream.obs[1]) rx = np.cos(ra) * np.cos(dec) ry = np.sin(ra) * np.cos(dec) rz = np.sin(dec) r = np.column_stack((rx, ry, rz)) # fit the plane x0 = np.array([0, 1, 0]) lsq = scipy.optimize.minimize(wfit_plane, x0, args=(r,)) x0 = lsq.x/np.linalg.norm(lsq.x) ra0 = np.arctan2(x0[1], x0[0]) dec0 = np.arcsin(x0[2]) ra0 += np.pi dec0 = np.pi/2 - dec0 # euler rotations R0 = myutils.rotmatrix(np.degrees(-ra0), 2) R1 = myutils.rotmatrix(np.degrees(dec0), 1) R2 = myutils.rotmatrix(0, 2) R = np.dot(R2, np.matmul(R1, R0)) xi, eta = myutils.rotate_angles(stream.obs[0], stream.obs[1], R) # put xi = 50 at the beginning of the stream xi[xi>180] -= 360 xi += 360 xi0 = np.min(xi) - 50 R2 = myutils.rotmatrix(-xi0, 2) R = np.dot(R2, np.matmul(R1, R0)) xi, eta = myutils.rotate_angles(stream.obs[0], stream.obs[1], R) if save: np.save('../data/rotmatrix_{}'.format(name), R) f = open('../data/mock_{}.params'.format(name), 'rb') mock = pickle.load(f) mock['rotmatrix'] = R f.close() f = open('../data/mock_{}.params'.format(name), 'wb') pickle.dump(mock, f) f.close() if graph: plt.close() fig, ax = plt.subplots(1,2,figsize=(10,5)) plt.sca(ax[0]) plt.plot(stream.obs[0], stream.obs[1], 'ko') plt.xlabel('R.A. (deg)') plt.ylabel('Dec (deg)') plt.sca(ax[1]) plt.plot(xi, eta, 'ko') plt.xlabel('$\\xi$ (deg)') plt.ylabel('$\\eta$ (deg)') plt.ylim(-5, 5) plt.tight_layout() plt.savefig('../plots/gc_orientation_{}.png'.format(name)) return R def wfit_plane(x, r, p=None): """Fit a plane to a set of 3d points""" Np = np.shape(r)[0] if np.any(p)==None: p = np.ones(Np) Q = np.zeros((3,3)) for i in range(Np): Q += p[i]**2 * np.outer(r[i], r[i]) x = x/np.linalg.norm(x) lsq = np.inner(x, np.inner(Q, x)) return lsq # observed streams #def load_stream(n): #"""Load stream observations""" #if n==-1: #observed = load_gd1(present=[0,1,2,3]) #elif n==-2: #observed = load_pal5(present=[0,1,2,3]) #elif n==-3: #observed = load_tri(present=[0,1,2,3]) #elif n==-4: #observed = load_atlas(present=[0,1,2,3]) #return observed def endpoints(name): """""" stream = load_stream(name) # find endpoints amin = np.argmin(stream.obs[0]) amax = np.argmax(stream.obs[0]) ra = np.array([stream.obs[0][i] for i in [amin, amax]]) dec = np.array([stream.obs[1][i] for i in [amin, amax]]) f = open('../data/mock_{}.params'.format(name), 'rb') mock = pickle.load(f) # rotate endpoints R = mock['rotmatrix'] xi, eta = myutils.rotate_angles(ra, dec, R) #xi, eta = myutils.rotate_angles(stream.obs[0], stream.obs[1], R) mock['ra_range'] = ra mock['xi_range'] = xi #np.percentile(xi, [10,90]) f.close() f = open('../data/mock_{}.params'.format(name), 'wb') pickle.dump(mock, f) f.close() def load_pal5(present, nobs=50, potential='gal'): """""" if len(present)==2: t = Table.read('../data/pal5_members.txt', format='ascii.commented_header') dist = 21.7 deltadist = 0.7 np.random.seed(34) t = t[np.random.randint(0, high=len(t), size=nobs)] nobs = len(t) d = np.random.randn(nobs)*deltadist + dist obs = np.array([t['ra'], t['dec'], d]) obsunit = [u.deg, u.deg, u.kpc] err = np.repeat( np.array([2e-4, 2e-4, 0.7]), nobs ).reshape(3, -1) obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc] if len(present)==3: #t = Table.read('../data/pal5_kinematic.txt', format='ascii.commented_header') t = Table.read('../data/pal5_allmembers.txt', format='ascii.commented_header') obs = np.array([t['ra'], t['dec'], t['d']]) obsunit = [u.deg, u.deg, u.kpc] err = np.array([t['err_ra'], t['err_dec'], t['err_d']]) obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc] if len(present)==4: #t = Table.read('../data/pal5_kinematic.txt', format='ascii.commented_header') t = Table.read('../data/pal5_allmembers.txt', format='ascii.commented_header') obs = np.array([t['ra'], t['dec'], t['d'], t['vr']]) obsunit = [u.deg, u.deg, u.kpc, u.km/u.s] err = np.array([t['err_ra'], t['err_dec'], t['err_d'], t['err_vr']]) obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s] observed = Stream(potential=potential) observed.obs = obs observed.obsunit = obsunit observed.err = err observed.obserror = obserr return observed def load_gd1(present, nobs=50, potential='gal'): """""" if len(present)==3: t = Table.read('../data/gd1_members.txt', format='ascii.commented_header') dist = 0 deltadist = 0.5 np.random.seed(34) t = t[np.random.randint(0, high=len(t), size=nobs)] nobs = len(t) d = np.random.randn(nobs)*deltadist + dist d += t['l']*0.04836 + 9.86 obs = np.array([t['ra'], t['dec'], d]) obsunit = [u.deg, u.deg, u.kpc] err = np.repeat( np.array([2e-4, 2e-4, 0.5]), nobs ).reshape(3, -1) obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc] if len(present)==4: #t = Table.read('../data/gd1_kinematic.txt', format='ascii.commented_header') t = Table.read('../data/gd1_allmembers.txt', format='ascii.commented_header') obs = np.array([t['ra'], t['dec'], t['d'], t['vr']]) obsunit = [u.deg, u.deg, u.kpc, u.km/u.s] err = np.array([t['err_ra'], t['err_dec'], t['err_d'], t['err_vr']]) obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s] ind = np.all(obs!=MASK, axis=0) observed = Stream(potential=potential) observed.obs = obs#[np.array(present)] observed.obsunit = obsunit observed.err = err#[np.array(present)] observed.obserror = obserr return observed def load_tri(present, nobs=50, potential='gal'): """""" if len(present)==4: t = Table.read('../data/tri_allmembers.txt', format='ascii.commented_header') obs = np.array([t['ra'], t['dec'], t['d'], t['vr']]) obsunit = [u.deg, u.deg, u.kpc, u.km/u.s] err = np.array([t['err_ra'], t['err_dec'], t['err_d'], t['err_vr']]) obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s] if len(present)==3: t = Table.read('../data/tri_allmembers.txt', format='ascii.commented_header') obs = np.array([t['ra'], t['dec'], t['d']]) obsunit = [u.deg, u.deg, u.kpc] err = np.array([t['err_ra'], t['err_dec'], t['err_d']]) obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc] ind = np.all(obs!=MASK, axis=0) observed = Stream(potential=potential) observed.obs = obs observed.obsunit = obsunit observed.err = err observed.obserror = obserr return observed def load_atlas(present, nobs=50, potential='gal'): """""" ra, dec = atlas_track() n = np.size(ra) d = np.random.randn(n)*2 + 20 obs = np.array([ra, dec, d]) obsunit = [u.deg, u.deg, u.kpc] err = np.array([np.ones(n)*0.05, np.ones(n)*0.05, np.ones(n)*2]) obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s] observed = Stream(potential=potential) observed.obs = obs observed.obsunit = obsunit observed.err = err observed.obserror = obserr return observed def atlas_track(): """""" ra0, dec0 = np.radians(77.16), np.radians(46.92 - 90) # euler rotations D = np.array([[np.cos(ra0), np.sin(ra0), 0], [-np.sin(ra0), np.cos(ra0), 0], [0, 0, 1]]) C = np.array([[np.cos(dec0), 0, np.sin(dec0)], [0, 1, 0], [-np.sin(dec0), 0, np.cos(dec0)]]) B = np.diag(np.ones(3)) R = np.dot(B, np.dot(C, D)) Rinv = np.linalg.inv(R) l0 = np.linspace(0, 2*np.pi, 500) b0 = np.zeros(500) xeq, yeq, zeq = myutils.eq2car(l0, b0) eq = np.column_stack((xeq, yeq, zeq)) eq_rot = np.zeros(np.shape(eq)) for i in range(np.size(l0)): eq_rot[i] = np.dot(Rinv, eq[i]) l0_rot, b0_rot = myutils.car2eq(eq_rot[:, 0], eq_rot[:, 1], eq_rot[:, 2]) ra_s, dec_s = np.degrees(l0_rot), np.degrees(b0_rot) ind_s = (ra_s>17) & (ra_s<30) ra_s = ra_s[ind_s] dec_s = dec_s[ind_s] return (ra_s, dec_s) def fancy_name(n): """Return nicely formatted stream name""" names = {-1: 'GD-1', -2: 'Palomar 5', -3: 'Triangulum', -4: 'ATLAS'} return names[n] # model parameters def get_varied_pars(vary): """Return indices and steps for a preset of varied parameters, and a label for varied parameters Parameters: vary - string setting the parameter combination to be varied, options: 'potential', 'progenitor', 'halo', or a list thereof""" if type(vary) is not list: vary = [vary] Nt = len(vary) vlabel = '_'.join(vary) pid = [] dp = [] for v in vary: o1, o2 = get_varied_bytype(v) pid += o1 dp += o2 return (pid, dp, vlabel) def get_varied_bytype(vary): """Get varied parameter of a particular type""" if vary=='potential': pid = [5,6,8,10,11] dp = [20*u.km/u.s, 2*u.kpc, 0.05*u.Unit(1), 0.05*u.Unit(1), 0.4e11*u.Msun] elif vary=='bary': pid = [0,1,2,3,4] # gd1 dp = [1e-1*u.Msun, 0.005*u.kpc, 1e-1*u.Msun, 0.002*u.kpc, 0.002*u.kpc] ## atlas & triangulum #dp = [0.4e5*u.Msun, 0.0005*u.kpc, 0.5e6*u.Msun, 0.0002*u.kpc, 0.002*u.kpc] # pal5 dp = [1e-2*u.Msun, 0.000005*u.kpc, 1e-2*u.Msun, 0.000002*u.kpc, 0.00002*u.kpc] dp = [1e-7*u.Msun, 0.5*u.kpc, 1e-7*u.Msun, 0.5*u.kpc, 0.5*u.kpc] dp = [1e-2*u.Msun, 0.5*u.kpc, 1e-2*u.Msun, 0.5*u.kpc, 0.5*u.kpc] elif vary=='halo': pid = [5,6,8,10] dp = [20*u.km/u.s, 2*u.kpc, 0.05*u.Unit(1), 0.05*u.Unit(1)] dp = [35*u.km/u.s, 2.9*u.kpc, 0.05*u.Unit(1), 0.05*u.Unit(1)] elif vary=='progenitor': pid = [26,27,28,29,30,31] dp = [1*u.deg, 1*u.deg, 0.5*u.kpc, 20*u.km/u.s, 0.3*u.mas/u.yr, 0.3*u.mas/u.yr] elif vary=='dipole': pid = [11,12,13] #dp = [1e-11*u.Unit(1), 1e-11*u.Unit(1), 1e-11*u.Unit(1)] dp = [0.05*u.pc/u.Myr**2, 0.05*u.pc/u.Myr**2, 0.05*u.pc/u.Myr**2] elif vary=='quad': pid = [14,15,16,17,18] dp = [0.5*u.Gyr**-2 for x in range(5)] elif vary=='octu': pid = [19,20,21,22,23,24,25] dp = [0.001*u.Gyr**-2*u.kpc**-1 for x in range(7)] else: pid = [] dp = [] return (pid, dp) def get_parlabel(pid): """Return label for a list of parameter ids Parameter: pid - list of parameter ids""" master = ['log $M_b$', '$a_b$', 'log $M_d$', '$a_d$', '$b_d$', '$V_h$', '$R_h$', '$\phi$', '$q_x$', '$q_y$', '$q_z$', '$a_{1,-1}$', '$a_{1,0}$', '$a_{1,1}$', '$a_{2,-2}$', '$a_{2,-1}$', '$a_{2,0}$', '$a_{2,1}$', '$a_{2,2}$', '$a_{3,-3}$', '$a_{3,-2}$', '$a_{3,-1}$', '$a_{3,0}$', '$a_{3,1}$', '$a_{3,2}$', '$a_{3,3}$', '$RA_p$', '$Dec_p$', '$d_p$', '$V_{r_p}$', '$\mu_{\\alpha_p}$', '$\mu_{\delta_p}$', ] master_units = ['dex', 'kpc', 'dex', 'kpc', 'kpc', 'km/s', 'kpc', 'rad', '', '', '', 'pc/Myr$^2$', 'pc/Myr$^2$', 'pc/Myr$^2$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'deg', 'deg', 'kpc', 'km/s', 'mas/yr', 'mas/yr', ] if type(pid) is list: labels = [] units = [] for i in pid: labels += [master[i]] units += [master_units[i]] else: labels = master[pid] units = master_units[pid] return (labels, units) def get_steps(Nstep=50, log=False): """Return deltax steps in both directions Paramerets: Nstep - number of steps in one direction (default: 50) log - if True, steps are logarithmically spaced (default: False)""" if log: step = np.logspace(-10, 1, Nstep) else: step = np.linspace(0.1, 10, Nstep) step = np.concatenate([-step[::-1], step]) return (Nstep, step) def lmc_position(): """""" ra = 80.8939*u.deg dec = -69.7561*u.deg dm = 18.48 d = 10**(1 + dm/5)*u.pc x = coord.SkyCoord(ra=ra, dec=dec, distance=d) xgal = [x.galactocentric.x.si, x.galactocentric.y.si, x.galactocentric.z.si] print(xgal) def lmc_properties(): """""" # penarrubia 2016 mass = 2.5e11*u.Msun ra = 80.8939*u.deg dec = -69.7561*u.deg dm = 18.48 d = 10**(1 + dm/5)*u.pc c1 = coord.SkyCoord(ra=ra, dec=dec, distance=d) cgal1 = c1.transform_to(coord.Galactocentric) xgal = np.array([cgal1.x.to(u.kpc).value, cgal1.y.to(u.kpc).value, cgal1.z.to(u.kpc).value])*u.kpc return (mass, xgal) # fit bspline to a stream model def fit_bspline(n, pparams=pparams_fid, dt=0.2*u.Myr, align=False, save='', graph=False, graphsave='', fiducial=False): """Fit bspline to a stream model and save to file""" Ndim = 6 fits = [None]*(Ndim-1) if align: rotmatrix = np.load('../data/rotmatrix_{}.npy'.format(n)) else: rotmatrix = None stream = stream_model(n, pparams0=pparams, dt=dt, rotmatrix=rotmatrix) Nobs = 10 k = 3 isort = np.argsort(stream.obs[0]) ra = np.linspace(np.min(stream.obs[0])*1.05, np.max(stream.obs[0])*0.95, Nobs) t = np.r_[(stream.obs[0][isort][0],)*(k+1), ra, (stream.obs[0][isort][-1],)*(k+1)] for j in range(Ndim-1): fits[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][isort], stream.obs[j+1][isort], t, k=k) if len(save)>0: np.savez('../data/{:s}'.format(save), fits=fits) if graph: xlims, ylims = get_stream_limits(n, align) ylabel = ['R.A. (deg)', 'Dec (deg)', 'd (kpc)', '$V_r$ (km/s)', '$\mu_\\alpha$ (mas/yr)', '$\mu_\delta$ (mas/yr)'] if align: ylabel[:2] = ['$\\xi$ (deg)', '$\\eta$ (deg)'] if fiducial: stream_fid = stream_model(n, pparams0=pparams_fid, dt=dt, rotmatrix=rotmatrix) fidsort = np.argsort(stream_fid.obs[0]) ra = np.linspace(np.min(stream_fid.obs[0])*1.05, np.max(stream_fid.obs[0])*0.95, Nobs) tfid = np.r_[(stream_fid.obs[0][fidsort][0],)*(k+1), ra, (stream_fid.obs[0][fidsort][-1],)*(k+1)] llabel = 'b-spline fit' else: llabel = '' plt.close() fig, ax = plt.subplots(2,5,figsize=(20,5), sharex=True, gridspec_kw = {'height_ratios':[3, 1]}) for i in range(Ndim-1): plt.sca(ax[0][i]) plt.plot(stream.obs[0], stream.obs[i+1], 'ko') plt.plot(stream.obs[0][isort], fits[i](stream.obs[0][isort]), 'r-', lw=2, label=llabel) if fiducial: fits_fid = scipy.interpolate.make_lsq_spline(stream_fid.obs[0][fidsort], stream_fid.obs[i+1][fidsort], tfid, k=k) plt.plot(stream_fid.obs[0], stream_fid.obs[i+1], 'wo', mec='k', alpha=0.1) plt.plot(stream_fid.obs[0][fidsort], fits_fid(stream_fid.obs[0][fidsort]), 'b-', lw=2, label='Fiducial') plt.ylabel(ylabel[i+1]) plt.xlim(xlims[0], xlims[1]) plt.ylim(ylims[i][0], ylims[i][1]) plt.sca(ax[1][i]) if fiducial: yref = fits_fid(stream.obs[0]) ycolor = 'b' else: yref = fits[i](stream.obs[0]) ycolor = 'r' plt.axhline(0, color=ycolor, lw=2) if fiducial: plt.plot(stream.obs[0][isort], stream.obs[i+1][isort] - stream_fid.obs[i+1][fidsort], 'wo', mec='k', alpha=0.1) plt.plot(stream.obs[0], stream.obs[i+1] - yref, 'ko') if fiducial: fits_diff = scipy.interpolate.make_lsq_spline(stream.obs[0][isort], stream.obs[i+1][isort] - stream_fid.obs[i+1][fidsort], t, k=k) plt.plot(stream.obs[0][isort], fits_diff(stream.obs[0][isort]), 'r--') plt.plot(stream.obs[0][isort], fits[i](stream.obs[0][isort]) - yref[isort], 'r-', lw=2, label=llabel) plt.xlabel(ylabel[0]) plt.ylabel('$\Delta$ {}'.format(ylabel[i+1].split(' ')[0])) if fiducial: plt.sca(ax[0][Ndim-2]) plt.legend(fontsize='small') plt.tight_layout() if len(graphsave)>0: plt.savefig('../plots/{:s}.png'.format(graphsave)) def fitbyt_bspline(n, pparams=pparams_fid, dt=0.2*u.Myr, align=False, save='', graph=False, graphsave='', fiducial=False): """Fit each tail individually""" Ndim = 6 fits = [None]*(Ndim-1) if align: rotmatrix = np.load('../data/rotmatrix_{}.npy'.format(n)) else: rotmatrix = None stream = stream_model(n, pparams0=pparams, dt=dt, rotmatrix=rotmatrix) Nobs = 10 k = 3 isort = np.argsort(stream.obs[0]) ra = np.linspace(np.min(stream.obs[0])*1.05, np.max(stream.obs[0])*0.95, Nobs) t = np.r_[(stream.obs[0][isort][0],)*(k+1), ra, (stream.obs[0][isort][-1],)*(k+1)] for j in range(Ndim-1): fits[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][isort], stream.obs[j+1][isort], t, k=k) if len(save)>0: np.savez('../data/{:s}'.format(save), fits=fits) if graph: xlims, ylims = get_stream_limits(n, align) ylabel = ['R.A. (deg)', 'Dec (deg)', 'd (kpc)', '$V_r$ (km/s)', '$\mu_\\alpha$ (mas/yr)', '$\mu_\delta$ (mas/yr)'] if align: ylabel[:2] = ['$\\xi$ (deg)', '$\\eta$ (deg)'] if fiducial: stream_fid = stream_model(n, pparams0=pparams_fid, dt=dt, rotmatrix=rotmatrix) plt.close() fig, ax = plt.subplots(2,Ndim,figsize=(20,4), sharex=True, gridspec_kw = {'height_ratios':[3, 1]}) for i in range(Ndim): plt.sca(ax[0][i]) Nhalf = int(0.5*np.size(stream.obs[i])) plt.plot(stream.obs[i][:Nhalf], 'o') plt.plot(stream.obs[i][Nhalf:], 'o') if fiducial: plt.plot(stream_fid.obs[i][:Nhalf], 'wo', mec='k', mew=0.2, alpha=0.5) plt.plot(stream_fid.obs[i][Nhalf:], 'wo', mec='k', mew=0.2, alpha=0.5) plt.ylabel(ylabel[i]) plt.sca(ax[1][i]) if fiducial: plt.plot(stream.obs[i][:Nhalf] - stream_fid.obs[i][:Nhalf], 'o') plt.plot(stream.obs[i][Nhalf:] - stream_fid.obs[i][Nhalf:], 'o') if fiducial: plt.sca(ax[0][Ndim-1]) plt.legend(fontsize='small') plt.tight_layout() if len(graphsave)>0: plt.savefig('../plots/{:s}.png'.format(graphsave)) else: return fig def get_stream_limits(n, align=False): """Return lists with limiting values in different dimensions""" if n==-1: xlims = [260, 100] ylims = [[-20, 70], [5, 15], [-400, 400], [-15,5], [-15, 5]] elif n==-2: xlims = [250, 210] ylims = [[-20, 15], [17, 27], [-80, -20], [-5,0], [-5, 0]] elif n==-3: xlims = [27, 17] ylims = [[10, 50], [34, 36], [-175, -50], [0.45, 1], [0.1, 0.7]] elif n==-4: xlims = [35, 10] ylims = [[-40, -20], [15, 25], [50, 200], [-0.5,0.5], [-1.5, -0.5]] if align: ylims[0] = [-5, 5] xup = [110, 110, 80, 80] xlims = [xup[np.abs(n)-1], 40] return (xlims, ylims) # step sizes for derivatives def iterate_steps(n): """Calculate derivatives for different parameter classes, and plot""" for vary in ['bary', 'halo', 'progenitor']: print(n, vary) step_convergence(n, Nstep=10, vary=vary) choose_step(n, Nstep=10, vary=vary) def iterate_plotsteps(n): """Plot stream models for a variety of model parameters""" for vary in ['bary', 'halo', 'progenitor']: print(n, vary) pid, dp, vlabel = get_varied_pars(vary) for p in range(len(pid)): plot_steps(n, p=p, Nstep=5, vary=vary, log=False) def plot_steps(n, p=0, Nstep=20, log=True, dt=0.2*u.Myr, vary='halo', verbose=False, align=True, observer=mw_observer, vobs=vsun): """Plot stream for different values of a potential parameter""" if align: rotmatrix = np.load('../data/rotmatrix_{}.npy'.format(n)) else: rotmatrix = None pparams0 = pparams_fid pid, dp, vlabel = get_varied_pars(vary) plabel, punit = get_parlabel(pid[p]) Nstep, step = get_steps(Nstep=Nstep, log=log) plt.close() fig, ax = plt.subplots(5,5,figsize=(20,10), sharex=True, gridspec_kw = {'height_ratios':[3, 1, 1, 1, 1]}) # fiducial model stream0 = stream_model(n, pparams0=pparams0, dt=dt, rotmatrix=rotmatrix, observer=observer, vobs=vobs) Nobs = 10 k = 3 isort = np.argsort(stream0.obs[0]) ra = np.linspace(np.min(stream0.obs[0])*1.05, np.max(stream0.obs[0])*0.95, Nobs) t = np.r_[(stream0.obs[0][isort][0],)*(k+1), ra, (stream0.obs[0][isort][-1],)*(k+1)] fits = [None]*5 for j in range(5): fits[j] = scipy.interpolate.make_lsq_spline(stream0.obs[0][isort], stream0.obs[j+1][isort], t, k=k) # excursions stream_fits = [[None] * 5 for x in range(2 * Nstep)] for i, s in enumerate(step[:]): pparams = [x for x in pparams0] pparams[pid[p]] = pparams[pid[p]] + s*dp[p] stream = stream_model(n, pparams0=pparams, dt=dt, rotmatrix=rotmatrix) color = mpl.cm.RdBu(i/(2*Nstep-1)) #print(i, dp[p], pparams) # fits iexsort = np.argsort(stream.obs[0]) raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs) tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)] fits_ex = [None]*5 for j in range(5): fits_ex[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[j+1][iexsort], tex, k=k) stream_fits[i][j] = fits_ex[j] plt.sca(ax[0][j]) plt.plot(stream.obs[0], stream.obs[j+1], 'o', color=color, ms=2) plt.sca(ax[1][j]) plt.plot(stream.obs[0], stream.obs[j+1] - fits[j](stream.obs[0]), 'o', color=color, ms=2) plt.sca(ax[2][j]) plt.plot(stream.obs[0], fits_ex[j](stream.obs[0]) - fits[j](stream.obs[0]), 'o', color=color, ms=2) plt.sca(ax[3][j]) plt.plot(stream.obs[0], (fits_ex[j](stream.obs[0]) - fits[j](stream.obs[0]))/(s*dp[p]), 'o', color=color, ms=2) # symmetric derivatives ra_der = np.linspace(np.min(stream0.obs[0])*1.05, np.max(stream0.obs[0])*0.95, 100) for i in range(Nstep): color = mpl.cm.Greys_r(i/Nstep) for j in range(5): dy = stream_fits[i][j](ra_der) - stream_fits[-i-1][j](ra_der) dydx = -dy / np.abs(2*step[i]*dp[p]) plt.sca(ax[4][j]) plt.plot(ra_der, dydx, '-', color=color, lw=2, zorder=Nstep-i) # labels, limits xlims, ylims = get_stream_limits(n, align) ylabel = ['R.A. (deg)', 'Dec (deg)', 'd (kpc)', '$V_r$ (km/s)', '$\mu_\\alpha$ (mas/yr)', '$\mu_\delta$ (mas/yr)'] if align: ylabel[:2] = ['$\\xi$ (deg)', '$\\eta$ (deg)'] for j in range(5): plt.sca(ax[0][j]) plt.ylabel(ylabel[j+1]) plt.xlim(xlims[0], xlims[1]) plt.ylim(ylims[j][0], ylims[j][1]) plt.sca(ax[1][j]) plt.ylabel('$\Delta$ {}'.format(ylabel[j+1].split(' ')[0])) plt.sca(ax[2][j]) plt.ylabel('$\Delta$ {}'.format(ylabel[j+1].split(' ')[0])) plt.sca(ax[3][j]) plt.ylabel('$\Delta${}/$\Delta${}'.format(ylabel[j+1].split(' ')[0], plabel)) plt.sca(ax[4][j]) plt.xlabel(ylabel[0]) plt.ylabel('$\langle$$\Delta${}/$\Delta${}$\\rangle$'.format(ylabel[j+1].split(' ')[0], plabel)) #plt.suptitle('Varying {}'.format(plabel), fontsize='small') plt.tight_layout() plt.savefig('../plots/observable_steps_{:d}_{:s}_p{:d}_Ns{:d}.png'.format(n, vlabel, p, Nstep)) def step_convergence(name='gd1', Nstep=20, log=True, layer=1, dt=0.2*u.Myr, vary='halo', align=True, graph=False, verbose=False, Nobs=10, k=3, ra_der=np.nan, Nra=50): """Check deviations in numerical derivatives for consecutive step sizes""" mock = pickle.load(open('../data/mock_{}.params'.format(name),'rb')) if align: rotmatrix = mock['rotmatrix'] xmm = mock['xi_range'] else: rotmatrix = np.eye(3) xmm = mock['ra_range'] # fiducial model pparams0 = pparams_fid stream0 = stream_model(name=name, pparams0=pparams0, dt=dt, rotmatrix=rotmatrix) if np.any(~np.isfinite(ra_der)): ra_der = np.linspace(xmm[0]*1.05, xmm[1]*0.95, Nra) Nra = np.size(ra_der) # parameters to vary pid, dp, vlabel = get_varied_pars(vary) Np = len(pid) dpvec = np.array([x.value for x in dp]) Nstep, step = get_steps(Nstep=Nstep, log=log) dydx_all = np.empty((Np, Nstep, 5, Nra)) dev_der = np.empty((Np, Nstep-2*layer)) step_der = np.empty((Np, Nstep-2*layer)) for p in range(Np): plabel = get_parlabel(pid[p]) if verbose: print(p, plabel) # excursions stream_fits = [[None] * 5 for x in range(2 * Nstep)] for i, s in enumerate(step[:]): if verbose: print(i, s) pparams = [x for x in pparams0] pparams[pid[p]] = pparams[pid[p]] + s*dp[p] stream = stream_model(name=name, pparams0=pparams, dt=dt, rotmatrix=rotmatrix) # fits iexsort = np.argsort(stream.obs[0]) raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs) tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)] fits_ex = [None]*5 for j in range(5): fits_ex[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[j+1][iexsort], tex, k=k) stream_fits[i][j] = fits_ex[j] # symmetric derivatives dydx = np.empty((Nstep, 5, Nra)) for i in range(Nstep): color = mpl.cm.Greys_r(i/Nstep) for j in range(5): dy = stream_fits[i][j](ra_der) - stream_fits[-i-1][j](ra_der) dydx[i][j] = -dy / np.abs(2*step[i]*dp[p]) dydx_all[p] = dydx # deviations from adjacent steps step_der[p] = -step[layer:Nstep-layer] * dp[p] for i in range(layer, Nstep-layer): dev_der[p][i-layer] = 0 for j in range(5): for l in range(layer): dev_der[p][i-layer] += np.sum((dydx[i][j] - dydx[i-l-1][j])**2) dev_der[p][i-layer] += np.sum((dydx[i][j] - dydx[i+l+1][j])**2) np.savez('../data/step_convergence_{}_{}_Ns{}_log{}_l{}'.format(name, vlabel, Nstep, log, layer), step=step_der, dev=dev_der, ders=dydx_all, steps_all=np.outer(dpvec,step[Nstep:])) if graph: plt.close() fig, ax = plt.subplots(1,Np,figsize=(4*Np,4)) for p in range(Np): plt.sca(ax[p]) plt.plot(step_der[p], dev_der[p], 'ko') #plabel = get_parlabel(pid[p]) #plt.xlabel('$\Delta$ {}'.format(plabel)) plt.ylabel('D') plt.gca().set_yscale('log') plt.tight_layout() plt.savefig('../plots/step_convergence_{}_{}_Ns{}_log{}_l{}.png'.format(name, vlabel, Nstep, log, layer)) def choose_step(name='gd1', tolerance=2, Nstep=20, log=True, layer=1, vary='halo'): """""" pid, dp, vlabel = get_varied_pars(vary) Np = len(pid) plabels, units = get_parlabel(pid) punits = ['({})'.format(x) if len(x) else '' for x in units] t = np.load('../data/step_convergence_{}_{}_Ns{}_log{}_l{}.npz'.format(name, vlabel, Nstep, log, layer)) dev = t['dev'] step = t['step'] dydx = t['ders'] steps_all = t['steps_all'][:,::-1] Nra = np.shape(dydx)[-1] best = np.empty(Np) # plot setup da = 4 nrow = 2 ncol = Np plt.close() fig, ax = plt.subplots(nrow, ncol, figsize=(da*ncol, da*1.3), squeeze=False, sharex='col', gridspec_kw = {'height_ratios':[1.2, 3]}) for p in range(Np): # choose step dmin = np.min(dev[p]) dtol = tolerance * dmin opt_step = np.min(step[p][dev[p]<dtol]) opt_id = step[p]==opt_step best[p] = opt_step ## largest step w deviation smaller than 1e-4 #opt_step = np.max(step[p][dev[p]<1e-4]) #opt_id = step[p]==opt_step #best[p] = opt_step plt.sca(ax[0][p]) for i in range(5): for j in range(10): plt.plot(steps_all[p], np.tanh(dydx[p,:,i,np.int64(j*Nra/10)]), '-', color='{}'.format(i/5), lw=0.5, alpha=0.5) plt.axvline(opt_step, ls='-', color='r', lw=2) plt.ylim(-1,1) plt.ylabel('Derivative') plt.title('{}'.format(plabels[p])+'$_{best}$ = '+'{:2.2g}'.format(opt_step), fontsize='small') plt.sca(ax[1][p]) plt.plot(step[p], dev[p], 'ko') plt.axvline(opt_step, ls='-', color='r', lw=2) plt.plot(step[p][opt_id], dev[p][opt_id], 'ro') plt.axhline(dtol, ls='-', color='orange', lw=1) y0, y1 = plt.gca().get_ylim() plt.axhspan(y0, dtol, color='orange', alpha=0.3, zorder=0) plt.gca().set_yscale('log') plt.gca().set_xscale('log') plt.xlabel('$\Delta$ {} {}'.format(plabels[p], punits[p])) plt.ylabel('Derivative deviation') np.save('../data/optimal_step_{}_{}'.format(name, vlabel), best) plt.tight_layout(h_pad=0) plt.savefig('../plots/step_convergence_{}_{}_Ns{}_log{}_l{}.png'.format(name, vlabel, Nstep, log, layer)) def read_optimal_step(name, vary, equal=False): """Return optimal steps for a range of parameter types""" if type(vary) is not list: vary = [vary] dp = np.empty(0) for v in vary: dp_opt = np.load('../data/optimal_step_{}_{}.npy'.format(name, v)) dp = np.concatenate([dp, dp_opt]) if equal: dp = np.array([0.05, 0.05, 0.2, 1, 0.01, 0.01, 0.05, 0.1, 0.05, 0.1, 0.1, 10, 1, 0.01, 0.01]) return dp def visualize_optimal_steps(name='gd1', vary=['progenitor', 'bary', 'halo'], align=True, dt=0.2*u.Myr, Nobs=50, k=3): """""" mock = pickle.load(open('../data/mock_{}.params'.format(name),'rb')) if align: rotmatrix = mock['rotmatrix'] xmm = mock['xi_range'] else: rotmatrix = np.eye(3) xmm = mock['ra_range'] # varied parameters pparams0 = pparams_fid pid, dp_fid, vlabel = get_varied_pars(vary) Np = len(pid) dp_opt = read_optimal_step(name, vary) dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)] fiducial = stream_model(name=name, pparams0=pparams0, dt=dt, rotmatrix=rotmatrix) iexsort = np.argsort(fiducial.obs[0]) raex = np.linspace(np.percentile(fiducial.obs[0], 10), np.percentile(fiducial.obs[0], 90), Nobs) tex = np.r_[(fiducial.obs[0][iexsort][0],)*(k+1), raex, (fiducial.obs[0][iexsort][-1],)*(k+1)] fit = scipy.interpolate.make_lsq_spline(fiducial.obs[0][iexsort], fiducial.obs[1][iexsort], tex, k=k) nrow = 2 ncol = np.int64((Np+1)/nrow) da = 4 c = ['b', 'b', 'b', 'r', 'r', 'r'] plt.close() fig, ax = plt.subplots(nrow, ncol, figsize=(ncol*da, nrow*da), squeeze=False) for p in range(Np): plt.sca(ax[p%2][int(p/2)]) for i, s in enumerate([-1.1, -1, -0.9, 0.9, 1, 1.1]): pparams = [x for x in pparams0] pparams[pid[p]] = pparams[pid[p]] + s*dp[p] stream = stream_model(name=name, pparams0=pparams, dt=dt, rotmatrix=rotmatrix) # bspline fits to stream centerline iexsort = np.argsort(stream.obs[0]) raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs) tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)] fitex = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[1][iexsort], tex, k=k) plt.plot(raex, fitex(raex) - fit(raex), '-', color=c[i]) plt.xlabel('R.A. (deg)') plt.ylabel('Dec (deg)') #print(get_parlabel(p)) plt.title('$\Delta$ {} = {:.2g}'.format(get_parlabel(p)[0], dp[p]), fontsize='medium') plt.tight_layout() plt.savefig('../plots/{}_optimal_steps.png'.format(name), dpi=200) # observing modes def define_obsmodes(): """Output a pickled dictionary with typical uncertainties and dimensionality of data for a number of observing modes""" obsmodes = {} obsmodes['fiducial'] = {'sig_obs': np.array([0.1, 2, 5, 0.1, 0.1]), 'Ndim': [3,4,6]} obsmodes['binospec'] = {'sig_obs': np.array([0.1, 2, 10, 0.1, 0.1]), 'Ndim': [3,4,6]} obsmodes['hectochelle'] = {'sig_obs': np.array([0.1, 2, 1, 0.1, 0.1]), 'Ndim': [3,4,6]} obsmodes['desi'] = {'sig_obs': np.array([0.1, 2, 10, np.nan, np.nan]), 'Ndim': [4,]} obsmodes['gaia'] = {'sig_obs': np.array([0.1, 0.2, 10, 0.2, 0.2]), 'Ndim': [6,]} obsmodes['exgal'] = {'sig_obs': np.array([0.5, np.nan, 20, np.nan, np.nan]), 'Ndim': [3,]} pickle.dump(obsmodes, open('../data/observing_modes.info','wb')) def obsmode_name(mode): """Return full name of the observing mode""" if type(mode) is not list: mode = [mode] full_names = {'fiducial': 'Fiducial', 'binospec': 'Binospec', 'hectochelle': 'Hectochelle', 'desi': 'DESI-like', 'gaia': 'Gaia-like', 'exgal': 'Extragalactic'} keys = full_names.keys() names = [] for m in mode: if m in keys: name = full_names[m] else: name = m names += [name] return names # crbs using bspline def calculate_crb(name='gd1', dt=0.2*u.Myr, vary=['progenitor', 'bary', 'halo'], ra=np.nan, dd=0.5, Nmin=15, verbose=False, align=True, scale=False, errmode='fiducial', k=3): """""" mock = pickle.load(open('../data/mock_{}.params'.format(name),'rb')) if align: rotmatrix = mock['rotmatrix'] xmm = np.sort(mock['xi_range']) else: rotmatrix = np.eye(3) xmm = np.sort(mock['ra_range']) # typical uncertainties and data availability obsmodes = pickle.load(open('../data/observing_modes.info', 'rb')) if errmode not in obsmodes.keys(): errmode = 'fiducial' sig_obs = obsmodes[errmode]['sig_obs'] data_dim = obsmodes[errmode]['Ndim'] # mock observations if np.any(~np.isfinite(ra)): if (np.int64((xmm[1]-xmm[0])/dd + 1) < Nmin): dd = (xmm[1]-xmm[0])/Nmin ra = np.arange(xmm[0], xmm[1]+dd, dd) #ra = np.linspace(xmm[0]*1.05, xmm[1]*0.95, Nobs) #else: Nobs = np.size(ra) print(name, Nobs) err = np.tile(sig_obs, Nobs).reshape(Nobs,-1) # varied parameters pparams0 = pparams_fid pid, dp_fid, vlabel = get_varied_pars(vary) Np = len(pid) dp_opt = read_optimal_step(name, vary) dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)] fits_ex = [[[None]*5 for x in range(2)] for y in range(Np)] if scale: dp_unit = unity_scale(dp) dps = [x*y for x,y in zip(dp, dp_unit)] # calculate derivatives for all parameters for p in range(Np): for i, s in enumerate([-1, 1]): pparams = [x for x in pparams0] pparams[pid[p]] = pparams[pid[p]] + s*dp[p] stream = stream_model(name=name, pparams0=pparams, dt=dt, rotmatrix=rotmatrix) # bspline fits to stream centerline iexsort = np.argsort(stream.obs[0]) raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs) tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)] for j in range(5): fits_ex[p][i][j] = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[j+1][iexsort], tex, k=k) # populate matrix of derivatives and calculate CRB for Ndim in data_dim: #for Ndim in [6,]: Ndata = Nobs * (Ndim - 1) cyd = np.empty(Ndata) dydx = np.empty((Np, Ndata)) dy2 = np.empty((2, Np, Ndata)) for j in range(1, Ndim): for p in range(Np): dy = fits_ex[p][0][j-1](ra) - fits_ex[p][1][j-1](ra) dy2[0][p][(j-1)*Nobs:j*Nobs] = fits_ex[p][0][j-1](ra) dy2[1][p][(j-1)*Nobs:j*Nobs] = fits_ex[p][1][j-1](ra) #positive = np.abs(dy)>0 #if verbose: print('{:d},{:d} {:s} min{:.1e} max{:1e} med{:.1e}'.format(j, p, get_parlabel(pid[p])[0], np.min(np.abs(dy[positive])), np.max(np.abs(dy)), np.median(np.abs(dy)))) if scale: dydx[p][(j-1)*Nobs:j*Nobs] = -dy / np.abs(2*dps[p].value) else: dydx[p][(j-1)*Nobs:j*Nobs] = -dy / np.abs(2*dp[p].value) #if verbose: print('{:d},{:d} {:s} min{:.1e} max{:1e} med{:.1e}'.format(j, p, get_parlabel(pid[p])[0], np.min(np.abs(dydx[p][(j-1)*Nobs:j*Nobs][positive])), np.max(np.abs(dydx[p][(j-1)*Nobs:j*Nobs])), np.median(np.abs(dydx[p][(j-1)*Nobs:j*Nobs])))) #print(j, p, get_parlabel(pid[p])[0], dp[p], np.min(np.abs(dy)), np.max(np.abs(dy)), np.median(dydx[p][(j-1)*Nobs:j*Nobs])) cyd[(j-1)*Nobs:j*Nobs] = err[:,j-1]**2 np.savez('../data/crb/components_{:s}{:1d}_{:s}_a{:1d}_{:s}'.format(errmode, Ndim, name, align, vlabel), dydx=dydx, y=dy2, cyd=cyd, dp=dp_opt) # data component of the Fisher matrix cy = np.diag(cyd) cyi = np.diag(1. / cyd) caux = np.matmul(cyi, dydx.T) dxi = np.matmul(dydx, caux) # component based on prior knowledge of model parameters pxi = priors(name, vary) # full Fisher matrix cxi = dxi + pxi if verbose: cx = np.linalg.inv(cxi) cx = np.matmul(np.linalg.inv(np.matmul(cx, cxi)), cx) # iteration to improve inverse at large cond numbers sx = np.sqrt(np.diag(cx)) print('CRB', sx) print('condition {:g}'.format(np.linalg.cond(cxi))) print('standard inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0]))) cx = stable_inverse(cxi) print('stable inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0]))) np.savez('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}'.format(errmode, Ndim, name, align, vlabel), cxi=cxi, dxi=dxi, pxi=pxi) def priors(name, vary): """Return covariance matrix with prior knowledge about parameters""" mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb')) cprog = mock['prog_prior'] cbary = np.array([0.1*x.value for x in pparams_fid[:5]])**-2 chalo = np.zeros(4) cdipole = np.zeros(3) cquad = np.zeros(5) coctu = np.zeros(7) priors = {'progenitor': cprog, 'bary': cbary, 'halo': chalo, 'dipole': cdipole, 'quad': cquad, 'octu': coctu} cprior = np.empty(0) for v in vary: cprior = np.concatenate([cprior, priors[v]]) pxi = np.diag(cprior) return pxi def scale2invert(name='gd1', Ndim=6, vary=['progenitor', 'bary', 'halo'], verbose=False, align=True, errmode='fiducial'): """""" pid, dp_fid, vlabel = get_varied_pars(vary) #dp = read_optimal_step(name, vary) d = np.load('../data/crb/components_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel)) dydx = d['dydx'] cyd = d['cyd'] y = d['y'] dp = d['dp'] dy = (y[1,:,:] - y[0,:,:]) dydx = (y[1,:,:] - y[0,:,:]) / (2*dp[:,np.newaxis]) scaling_par = np.median(np.abs(dydx), axis=1) dydx = dydx / scaling_par[:,np.newaxis] dydx_ = np.reshape(dydx, (len(dp), Ndim-1, -1)) scaling_dim = np.median(np.abs(dydx_), axis=(2,0)) dydx_ = dydx_ / scaling_dim[np.newaxis,:,np.newaxis] cyd_ = np.reshape(cyd, (Ndim-1, -1)) cyd_ = cyd_ / scaling_dim[:,np.newaxis] cyd = np.reshape(cyd_, (-1)) dydx = np.reshape(dydx_, (len(dp), -1)) mmin = np.min(np.abs(dy), axis=0) mmax = np.max(np.abs(dy), axis=0) mmed = np.median(np.abs(dydx), axis=1) dyn_range = mmax/mmin #print(dyn_range) print(np.min(dyn_range), np.max(dyn_range), np.std(dyn_range)) cy = np.diag(cyd) cyi = np.diag(1. / cyd) caux = np.matmul(cyi, dydx.T) cxi = np.matmul(dydx, caux) print('condition {:e}'.format(np.linalg.cond(cxi))) cx = np.linalg.inv(cxi) cx = np.matmul(np.linalg.inv(np.matmul(cx, cxi)), cx) # iteration to improve inverse at large cond numbers print('standard inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0]))) cx = stable_inverse(cxi, maxiter=30) print('stable inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0]))) def unity_scale(dp): """""" dim_scale = 10**np.array([2, 3, 3, 2, 4, 3, 7, 7, 5, 7, 7, 4, 4, 4, 4, 3, 3, 3, 4, 3, 4, 4, 4]) dim_scale = 10**np.array([3, 2, 3, 4, 0, 2, 2, 3, 2, 2, 2, 4, 3, 2, 2, 3]) #dim_scale = 10**np.array([2, 3, 3, 1, 3, 2, 5, 5, 3, 5, 5, 2, 2, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3]) #dim_scale = 10**np.array([2, 3, 3, 1, 3, 2, 5, 5, 3, 5, 5, 2, 2, 4, 4, 3, 3, 3]) dp_unit = [(dp[x].value*dim_scale[x])**-1 for x in range(len(dp))] return dp_unit def test_inversion(name='gd1', Ndim=6, vary=['progenitor', 'bary', 'halo'], align=True, errmode='fiducial'): """""" pid, dp, vlabel = get_varied_pars(vary) d = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel)) cxi = d['cxi'] N = np.shape(cxi)[0] cx_ = np.linalg.inv(cxi) cx = stable_inverse(cxi, verbose=True, maxiter=100) #cx_ii = stable_inverse(cx, verbose=True, maxiter=50) print('condition {:g}'.format(np.linalg.cond(cxi))) print('linalg inverse', np.allclose(np.matmul(cx_,cxi), np.eye(N))) print('stable inverse', np.allclose(np.matmul(cx,cxi), np.eye(N))) #print(np.matmul(cx,cxi)) #print('inverse inverse', np.allclose(cx_ii, cxi)) def stable_inverse(a, maxiter=20, verbose=False): """Invert a matrix with a bad condition number""" N = np.shape(a)[0] # guess q = np.linalg.inv(a) qa = np.matmul(q,a) # iterate for i in range(maxiter): if verbose: print(i, np.sqrt(np.sum((qa - np.eye(N))**2)), np.allclose(qa, np.eye(N))) if np.allclose(qa, np.eye(N)): return q qai = np.linalg.inv(qa) q = np.matmul(qai,q) qa = np.matmul(q,a) return q def crb_triangle(n, vary, Ndim=6, align=True, plot='all', fast=False): """""" pid, dp, vlabel = get_varied_pars(vary) plabels, units = get_parlabel(pid) params = ['$\Delta$' + x + '({})'.format(y) for x,y in zip(plabels, units)] if align: alabel = '_align' else: alabel = '' fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel)) cxi = fm['cxi'] if fast: cx = np.linalg.inv(cxi) else: cx = stable_inverse(cxi) #print(cx[0][0]) if plot=='halo': cx = cx[:4, :4] params = params[:4] elif plot=='bary': cx = cx[4:9, 4:9] params = params[4:9] elif plot=='progenitor': cx = cx[9:, 9:] params = params[9:] Nvar = len(params) plt.close() dax = 2 fig, ax = plt.subplots(Nvar-1, Nvar-1, figsize=(dax*Nvar, dax*Nvar), sharex='col', sharey='row') for i in range(0,Nvar-1): for j in range(i+1,Nvar): plt.sca(ax[j-1][i]) cx_2d = np.array([[cx[i][i], cx[i][j]], [cx[j][i], cx[j][j]]]) w, v = np.linalg.eig(cx_2d) if np.all(np.isreal(v)): theta = np.degrees(np.arccos(v[0][0])) width = np.sqrt(w[0])*2 height = np.sqrt(w[1])*2 e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=mpl.cm.bone(0.5), lw=2) plt.gca().add_patch(e) plt.gca().autoscale_view() #plt.xlim(-ylim[i],ylim[i]) #plt.ylim(-ylim[j], ylim[j]) if j==Nvar-1: plt.xlabel(params[i]) if i==0: plt.ylabel(params[j]) # turn off unused axes for i in range(0,Nvar-1): for j in range(i+1,Nvar-1): plt.sca(ax[i][j]) plt.axis('off') plt.tight_layout() plt.savefig('../plots/crb_triangle_{:s}_{:d}_{:s}_{:d}_{:s}.pdf'.format(alabel, n, vlabel, Ndim, plot)) def crb_triangle_alldim(name='gd1', vary=['progenitor', 'bary', 'halo'], align=True, plot='all', fast=False, scale=False, errmode='fiducial'): """Show correlations in CRB between a chosen set of parameters in a triangle plot""" pid, dp_fid, vlabel = get_varied_pars(vary) dp_opt = read_optimal_step(name, vary) dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)] plabels, units = get_parlabel(pid) punits = [' ({})'.format(x) if len(x) else '' for x in units] params = ['$\Delta$ {}{}'.format(x, y) for x,y in zip(plabels, punits)] if plot=='halo': i0 = 11 i1 = 15 elif plot=='bary': i0 = 6 i1 = 11 elif plot=='progenitor': i0 = 0 i1 = 6 elif plot=='dipole': i0 = 15 i1 = len(params) else: i0 = 0 i1 = len(params) Nvar = i1 - i0 params = params[i0:i1] if scale: dp_unit = unity_scale(dp) #print(dp_unit) dp_unit = dp_unit[i0:i1] pid = pid[i0:i1] label = ['RA, Dec, d', 'RA, Dec, d, $V_r$', 'RA, Dec, d, $V_r$, $\mu_\\alpha$, $\mu_\delta$'] plt.close() dax = 2 fig, ax = plt.subplots(Nvar-1, Nvar-1, figsize=(dax*Nvar, dax*Nvar), sharex='col', sharey='row') for l, Ndim in enumerate([3, 4, 6]): fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel)) cxi = fm['cxi'] #cxi = np.load('../data/crb/bspline_cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npy'.format(errmode, Ndim, name, align, vlabel)) if fast: cx = np.linalg.inv(cxi) else: cx = stable_inverse(cxi) cx = cx[i0:i1,i0:i1] for i in range(0,Nvar-1): for j in range(i+1,Nvar): plt.sca(ax[j-1][i]) if scale: cx_2d = np.array([[cx[i][i]/dp_unit[i]**2, cx[i][j]/(dp_unit[i]*dp_unit[j])], [cx[j][i]/(dp_unit[j]*dp_unit[i]), cx[j][j]/dp_unit[j]**2]]) else: cx_2d = np.array([[cx[i][i], cx[i][j]], [cx[j][i], cx[j][j]]]) w, v = np.linalg.eig(cx_2d) if np.all(np.isreal(v)): theta = np.degrees(np.arctan2(v[1][0], v[0][0])) width = np.sqrt(w[0])*2 height = np.sqrt(w[1])*2 e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=mpl.cm.bone(0.1+l/4), lw=2, label=label[l]) plt.gca().add_patch(e) if l==1: plt.gca().autoscale_view() if j==Nvar-1: plt.xlabel(params[i]) if i==0: plt.ylabel(params[j]) # turn off unused axes for i in range(0,Nvar-1): for j in range(i+1,Nvar-1): plt.sca(ax[i][j]) plt.axis('off') plt.sca(ax[int(Nvar/2-1)][int(Nvar/2-1)]) plt.legend(loc=2, bbox_to_anchor=(1,1)) plt.tight_layout() plt.savefig('../plots/cxi_{:s}_{:s}_a{:1d}_{:s}_{:s}.pdf'.format(errmode, name, align, vlabel, plot)) def compare_optimal_steps(): """""" vary = ['progenitor', 'bary', 'halo', 'dipole', 'quad'] vary = ['progenitor', 'bary', 'halo'] for name in ['gd1', 'tri']: print(name) print(read_optimal_step(name, vary)) def get_crb(name, Nstep=10, vary=['progenitor', 'bary', 'halo'], first=True): """""" if first: store_progparams(name) wrap_angles(name, save=True) progenitor_prior(name) find_greatcircle(name=name) endpoints(name) for v in vary: step_convergence(name=name, Nstep=Nstep, vary=v) choose_step(name=name, Nstep=Nstep, vary=v) calculate_crb(name=name, vary=vary, verbose=True) crb_triangle_alldim(name=name, vary=vary) ######################## # cartesian coordinates # accelerations def acc_kepler(x, p=1*u.Msun): """Keplerian acceleration""" r = np.linalg.norm(x)*u.kpc a = -G * p * 1e11 * r**-3 * x return a.to(u.pc*u.Myr**-2) def acc_bulge(x, p=[pparams_fid[j] for j in range(2)]): """""" r = np.linalg.norm(x)*u.kpc a = -(G*p[0]*x/(r * (r + p[1])**2)).to(u.pc*u.Myr**-2) return a def acc_disk(x, p=[pparams_fid[j] for j in range(2,5)]): """""" R = np.linalg.norm(x[:2])*u.kpc z = x[2] a = -(G*p[0]*x * (R**2 + (p[1] + np.sqrt(z**2 + p[2]**2))**2)**-1.5).to(u.pc*u.Myr**-2) a[2] *= (1 + p[2]/np.sqrt(z**2 + p[2]**2)) return a def acc_nfw(x, p=[pparams_fid[j] for j in [5,6,8,10]]): """""" r = np.linalg.norm(x)*u.kpc q = np.array([1*u.Unit(1), p[2], p[3]]) a = (p[0]**2 * p[1] * r**-3 * (1/(1+p[1]/r) - np.log(1+r/p[1])) * x * q**-2).to(u.pc*u.Myr**-2) return a def acc_dipole(x, p=[pparams_fid[j] for j in range(11,14)]): """Acceleration due to outside dipole perturbation""" pv = [x.value for x in p] a = np.sqrt(3/(4*np.pi)) * np.array([pv[2], pv[0], pv[1]])*u.pc*u.Myr**-2 return a def acc_quad(x, p=[pparams_fid[j] for j in range(14,19)]): """Acceleration due to outside quadrupole perturbation""" a = np.zeros(3)*u.pc*u.Myr**-2 f = 0.5*np.sqrt(15/np.pi) a[0] = x[0]*(f*p[4] - f/np.sqrt(3)*p[2]) + x[1]*f*p[0] + x[2]*f*p[3] a[1] = x[0]*f*p[0] - x[1]*(f*p[4] + f/np.sqrt(3)*p[2]) + x[2]*f*p[1] a[2] = x[0]*f*p[3] + x[1]*f*p[1] + x[2]*2*f/np.sqrt(3)*p[2] return a.to(u.pc*u.Myr**-2) def acc_octu(x, p=[pparams_fid[j] for j in range(19,26)]): """Acceleration due to outside octupole perturbation""" a = np.zeros(3)*u.pc*u.Myr**-2 f = np.array([0.25*np.sqrt(35/(2*np.pi)), 0.5*np.sqrt(105/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(7/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(105/np.pi), 0.25*np.sqrt(35/(2*np.pi))]) xu = x.unit pu = p[0].unit pvec = np.array([i.value for i in p]) * pu dmat = np.ones((3,7)) * f * pvec * xu**2 x = np.array([i.value for i in x]) dmat[0] *= np.array([6*x[0]*x[1], x[1]*x[2], -2*x[0]*x[1], -6*x[0]*x[2], 4*x[2]**2-x[1]**2-3*x[0]**2, 2*x[0]*x[2], 3*x[0]**2-3*x[1]**2]) dmat[1] *= np.array([3*x[0]**2-3*x[1]**2, x[0]*x[2], 4*x[2]**2-x[0]**2-3*x[1]**2, -6*x[1]*x[2], -2*x[0]*x[1], -2*x[1]*x[2], -6*x[0]*x[1]]) dmat[2] *= np.array([0, x[0]*x[1], 8*x[1]*x[2], 6*x[2]**2-3*x[0]**2-3*x[1]**2, 8*x[0]*x[2], x[0]**2-x[1]**2, 0]) a = np.einsum('ij->i', dmat) * dmat.unit return a.to(u.pc*u.Myr**-2) # derivatives def der_kepler(x, p=1*u.Msun): """Derivative of Kepler potential parameters wrt cartesian components of the acceleration""" r = np.linalg.norm(x)*u.kpc dmat = np.zeros((3,1)) * u.pc**-1 * u.Myr**2 * u.Msun dmat[:,0] = (-r**3/(G*x)).to(u.pc**-1 * u.Myr**2 * u.Msun) * 1e-11 return dmat.value def pder_kepler(x, p=1*u.Msun): """Derivative of cartesian components of the acceleration wrt to Kepler potential parameter""" r = np.linalg.norm(x)*u.kpc dmat = np.zeros((3,1)) * u.pc * u.Myr**-2 * u.Msun**-1 dmat[:,0] = (-G*x*r**-3).to(u.pc * u.Myr**-2 * u.Msun**-1) * 1e11 return dmat.value def pder_nfw(x, pu=[pparams_fid[j] for j in [5,6,8,10]]): """Calculate derivatives of cartesian components of the acceleration wrt halo potential parameters""" p = pu q = np.array([1, p[2], p[3]]) # physical quantities r = np.linalg.norm(x)*u.kpc a = acc_nfw(x, p=pu) # derivatives dmat = np.zeros((3, 4)) # Vh dmat[:,0] = 2*a/p[0] # Rh dmat[:,1] = a/p[1] + p[0]**2 * p[1] * r**-3 * (1/(p[1]+p[1]**2/r) - 1/(r*(1+p[1]/r)**2)) * x * q**-2 # qy, qz for i in [1,2]: dmat[i,i+1] = (-2*a[i]/q[i]).value return dmat def pder_bulge(x, pu=[pparams_fid[j] for j in range(2)]): """Calculate derivarives of cartesian components of the acceleration wrt Hernquist bulge potential parameters""" # coordinates r = np.linalg.norm(x)*u.kpc # accelerations ab = acc_bulge(x, p=pu[:2]) # derivatives dmat = np.zeros((3, 2)) # Mb dmat[:,0] = ab/pu[0] # ab dmat[:,1] = 2 * ab / (r + pu[1]) return dmat def pder_disk(x, pu=[pparams_fid[j] for j in range(2,5)]): """Calculate derivarives of cartesian components of the acceleration wrt Miyamoto-Nagai disk potential parameters""" # coordinates R = np.linalg.norm(x[:2])*u.kpc z = x[2] aux = np.sqrt(z**2 + pu[2]**2) # accelerations ad = acc_disk(x, p=pu) # derivatives dmat = np.zeros((3, 3)) # Md dmat[:,0] = ad / pu[0] # ad dmat[:,1] = 3 * ad * (pu[1] + aux) / (R**2 + (pu[1] + aux)**2) # bd dmat[:2,2] = 3 * ad[:2] * (pu[1] + aux) / (R**2 + (pu[1] + aux)**2) * pu[2] / aux dmat[2,2] = (3 * ad[2] * (pu[1] + aux) / (R**2 + (pu[1] + aux)**2) * pu[2] / aux - G * pu[0] * z * (R**2 + (pu[1] + aux)**2)**-1.5 * z**2 * (pu[2]**2 + z**2)**-1.5).value return dmat def der_dipole(x, pu=[pparams_fid[j] for j in range(11,14)]): """Calculate derivatives of dipole potential parameters wrt (Cartesian) components of the acceleration vector a""" # shape: 3, Npar dmat = np.zeros((3,3)) f = np.sqrt((4*np.pi)/3) dmat[0,2] = f dmat[1,0] = f dmat[2,1] = f return dmat def pder_dipole(x, pu=[pparams_fid[j] for j in range(11,14)]): """Calculate derivatives of (Cartesian) components of the acceleration vector a wrt dipole potential parameters""" # shape: 3, Npar dmat = np.zeros((3,3)) f = np.sqrt(3/(4*np.pi)) dmat[0,2] = f dmat[1,0] = f dmat[2,1] = f return dmat def der_quad(x, p=[pparams_fid[j] for j in range(14,19)]): """Caculate derivatives of quadrupole potential parameters wrt (Cartesian) components of the acceleration vector a""" f = 2/np.sqrt(15/np.pi) s = np.sqrt(3) x = [1e-3/i.value for i in x] dmat = np.ones((3,5)) * f dmat[0] = np.array([x[1], 0, -s*x[0], x[2], x[0]]) dmat[1] = np.array([x[0], x[2], -s*x[1], 0, -x[1]]) dmat[2] = np.array([0, x[1], 0.5*s*x[2], x[0], 0]) return dmat def pder_quad(x, p=[pparams_fid[j] for j in range(14,19)]): """Caculate derivatives of (Cartesian) components of the acceleration vector a wrt quadrupole potential parameters""" f = 0.5*np.sqrt(15/np.pi) s = 1/np.sqrt(3) x = [1e-3*i.value for i in x] dmat = np.ones((3,5)) * f dmat[0] *= np.array([x[1], 0, -s*x[0], x[2], x[0]]) dmat[1] *= np.array([x[0], x[2], -s*x[1], 0, -x[1]]) dmat[2] *= np.array([0, x[1], 2*s*x[2], x[0], 0]) return dmat def pder_octu(x, p=[pparams_fid[j] for j in range(19,26)]): """Caculate derivatives of (Cartesian) components of the acceleration vector a wrt octupole potential parameters""" f = np.array([0.25*np.sqrt(35/(2*np.pi)), 0.5*np.sqrt(105/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(7/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(105/np.pi), 0.25*np.sqrt(35/(2*np.pi))]) x = [1e-3*i.value for i in x] dmat = np.ones((3,7)) * f dmat[0] *= np.array([6*x[0]*x[1], x[1]*x[2], -2*x[0]*x[1], -6*x[0]*x[2], 4*x[2]**2-x[1]**2-3*x[0]**2, 2*x[0]*x[2], 3*x[0]**2-3*x[1]**2]) dmat[1] *= np.array([3*x[0]**2-3*x[1]**2, x[0]*x[2], 4*x[2]**2-x[0]**2-3*x[1]**2, -6*x[1]*x[2], -2*x[0]*x[1], -2*x[1]*x[2], -6*x[0]*x[1]]) dmat[2] *= np.array([0, x[0]*x[1], 8*x[1]*x[2], 6*x[2]**2-3*x[0]**2-3*x[1]**2, 8*x[0]*x[2], x[0]**2-x[1]**2, 0]) return dmat def crb_ax(n, Ndim=6, vary=['halo', 'bary', 'progenitor'], align=True, fast=False): """Calculate CRB inverse matrix for 3D acceleration at position x in a halo potential""" pid, dp, vlabel = get_varied_pars(vary) if align: alabel = '_align' else: alabel = '' # read in full inverse CRB for stream modeling cxi = np.load('../data/crb/bspline_cxi{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, n, vlabel, Ndim)) if fast: cx = np.linalg.inv(cxi) else: cx = stable_inverse(cxi) # subset halo parameters Nhalo = 4 cq = cx[:Nhalo,:Nhalo] if fast: cqi = np.linalg.inv(cq) else: cqi = stable_inverse(cq) xi = np.array([-8.3, 0.1, 0.1])*u.kpc x0, v0 = gd1_coordinates() #xi = np.array(x0)*u.kpc d = 50 Nb = 20 x = np.linspace(x0[0]-d, x0[0]+d, Nb) y = np.linspace(x0[1]-d, x0[1]+d, Nb) x = np.linspace(-d, d, Nb) y = np.linspace(-d, d, Nb) xv, yv = np.meshgrid(x, y) xf = np.ravel(xv) yf = np.ravel(yv) af = np.empty((Nb**2, 3)) plt.close() fig, ax = plt.subplots(3,3,figsize=(11,10)) dimension = ['x', 'y', 'z'] xlabel = ['y', 'x', 'x'] ylabel = ['z', 'z', 'y'] for j in range(3): if j==0: xin = np.array([np.repeat(x0[j], Nb**2), xf, yf]).T elif j==1: xin = np.array([xf, np.repeat(x0[j], Nb**2), yf]).T elif j==2: xin = np.array([xf, yf, np.repeat(x0[j], Nb**2)]).T for i in range(Nb**2): #xi = np.array([xf[i], yf[i], x0[2]])*u.kpc xi = xin[i]*u.kpc a = acc_nfw(xi) dqda = halo_accelerations(xi) cai = np.matmul(dqda, np.matmul(cqi, dqda.T)) if fast: ca = np.linalg.inv(cai) else: ca = stable_inverse(cai) a_crb = (np.sqrt(np.diag(ca)) * u.km**2 * u.kpc**-1 * u.s**-2).to(u.pc*u.Myr**-2) af[i] = np.abs(a_crb/a) af[i] = a_crb for i in range(3): plt.sca(ax[j][i]) im = plt.imshow(af[:,i].reshape(Nb,Nb), extent=[-d, d, -d, d], cmap=mpl.cm.gray) #, norm=mpl.colors.LogNorm(), vmin=1e-2, vmax=0.1) plt.xlabel(xlabel[j]+' (kpc)') plt.ylabel(ylabel[j]+' (kpc)') divider = make_axes_locatable(plt.gca()) cax = divider.append_axes("top", size="4%", pad=0.05) plt.colorbar(im, cax=cax, orientation='horizontal') plt.gca().xaxis.set_ticks_position('top') cax.tick_params(axis='x', labelsize='xx-small') if j==0: plt.title('a$_{}$'.format(dimension[i]), y=4) plt.tight_layout(rect=[0,0,1,0.95]) plt.savefig('../plots/acc_{}_{}_{}.png'.format(n, vlabel, Ndim)) def acc_cart(x, components=['bary', 'halo', 'dipole']): """""" acart = np.zeros(3) * u.pc*u.Myr**-2 dict_acc = {'bary': [acc_bulge, acc_disk], 'halo': [acc_nfw], 'dipole': [acc_dipole], 'quad': [acc_quad], 'octu': [acc_octu], 'point': [acc_kepler]} accelerations = [] for c in components: accelerations += dict_acc[c] for acc in accelerations: a_ = acc(x) acart += a_ return acart def acc_rad(x, components=['bary', 'halo', 'dipole']): """Return radial acceleration""" r = np.linalg.norm(x) * x.unit theta = np.arccos(x[2].value/r.value) phi = np.arctan2(x[1].value, x[0].value) trans = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)]) a_cart = acc_cart(x, components=components) a_rad = np.dot(a_cart, trans) return a_rad def ader_cart(x, components=['bary', 'halo', 'dipole']): """""" dacart = np.empty((3,0)) dict_der = {'bary': [der_bulge, der_disk], 'halo': [der_nfw], 'dipole': [der_dipole], 'quad': [der_quad], 'point': [der_kepler]} derivatives = [] for c in components: derivatives += dict_der[c] for ader in derivatives: da_ = ader(x) dacart = np.hstack((dacart, da_)) return dacart def apder_cart(x, components=['bary', 'halo', 'dipole']): """""" dacart = np.empty((3,0)) dict_der = {'bary': [pder_bulge, pder_disk], 'halo': [pder_nfw], 'dipole': [pder_dipole], 'quad': [pder_quad], 'octu': [pder_octu], 'point': [pder_kepler]} derivatives = [] for c in components: derivatives += dict_der[c] for ader in derivatives: da_ = ader(x) dacart = np.hstack((dacart, da_)) return dacart def apder_rad(x, components=['bary', 'halo', 'dipole']): """Return dar/dx_pot (radial acceleration/potential parameters) evaluated at vector x""" r = np.linalg.norm(x) * x.unit theta = np.arccos(x[2].value/r.value) phi = np.arctan2(x[1].value, x[0].value) trans = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)]) dadq_cart = apder_cart(x, components=components) dadq_rad = np.einsum('ij,i->j', dadq_cart, trans) return dadq_rad def crb_acart(n, Ndim=6, vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], component='all', align=True, d=20, Nb=50, fast=False, scale=False, relative=True, progenitor=False, errmode='fiducial'): """""" pid, dp_fid, vlabel = get_varied_pars(vary) if align: alabel = '_align' else: alabel = '' if relative: vmin = 1e-2 vmax = 1 rlabel = ' / a' else: vmin = 3e-1 vmax = 1e1 rlabel = ' (pc Myr$^{-2}$)' # read in full inverse CRB for stream modeling cxi = np.load('../data/crb/bspline_cxi{:s}_{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, errmode, n, vlabel, Ndim)) if fast: cx = np.linalg.inv(cxi) else: cx = stable_inverse(cxi) # choose the appropriate components: Nprog, Nbary, Nhalo, Ndipole, Npoint = [6, 5, 4, 3, 1] if 'progenitor' not in vary: Nprog = 0 nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'all': Nprog, 'point': 0} nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'all': np.shape(cx)[0], 'point': 1} if 'progenitor' not in vary: nstart['dipole'] = Npoint nend['dipole'] = Npoint + Ndipole if component in ['bary', 'halo', 'dipole', 'point']: components = [component] else: components = [x for x in vary if x!='progenitor'] cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]] Npot = np.shape(cq)[0] if fast: cqi = np.linalg.inv(cq) else: cqi = stable_inverse(cq) if scale: dp_opt = read_optimal_step(n, vary) dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)] scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]]) scale_mat = np.outer(scale_vec, scale_vec) cqi *= scale_mat if progenitor: x0, v0 = gd1_coordinates() else: x0 = np.array([4, 4, 0]) Rp = np.linalg.norm(x0[:2]) zp = x0[2] R = np.linspace(-d, d, Nb) k = x0[1]/x0[0] x = R/np.sqrt(1+k**2) y = k * x z = np.linspace(-d, d, Nb) xv, zv = np.meshgrid(x, z) yv, zv = np.meshgrid(y, z) xin = np.array([np.ravel(xv), np.ravel(yv), np.ravel(zv)]).T Npix = np.size(xv) af = np.empty((Npix, 3)) derf = np.empty((Npix, 3, Npot)) for i in range(Npix): xi = xin[i]*u.kpc a = acc_cart(xi, components=components) dadq = apder_cart(xi, components=components) derf[i] = dadq ca = np.matmul(dadq, np.matmul(cq, dadq.T)) a_crb = np.sqrt(np.diag(ca)) * u.pc * u.Myr**-2 if relative: af[i] = np.abs(a_crb/a) else: af[i] = a_crb #print(xi, a_crb) # save np.savez('../data/crb_acart{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}'.format(alabel, errmode, n, vlabel, component, Ndim, d, Nb, relative), acc=af, x=xin, der=derf) plt.close() fig, ax = plt.subplots(1, 3, figsize=(15, 5)) label = ['$\Delta$ $a_X$', '$\Delta$ $a_Y$', '$\Delta$ $a_Z$'] for i in range(3): plt.sca(ax[i]) im = plt.imshow(af[:,i].reshape(Nb, Nb), origin='lower', extent=[-d, d, -d, d], cmap=mpl.cm.gray, vmin=vmin, vmax=vmax, norm=mpl.colors.LogNorm()) if progenitor: plt.plot(Rp, zp, 'r*', ms=10) plt.xlabel('R (kpc)') plt.ylabel('Z (kpc)') divider = make_axes_locatable(plt.gca()) cax = divider.append_axes("right", size="3%", pad=0.1) plt.colorbar(im, cax=cax) plt.ylabel(label[i] + rlabel) plt.tight_layout() plt.savefig('../plots/crb_acc_cart{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}.png'.format(alabel, errmode, n, vlabel, component, Ndim, d, Nb, relative)) def crb_acart_cov(n, Ndim=6, vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], component='all', j=0, align=True, d=20, Nb=30, fast=False, scale=False, relative=True, progenitor=False, batch=False, errmode='fiducial'): """""" pid, dp_fid, vlabel = get_varied_pars(vary) if align: alabel = '_align' else: alabel = '' if relative: vmin = 1e-2 vmax = 1 rlabel = ' / a' else: vmin = -0.005 vmax = 0.005 #vmin = 1e-2 #vmax = 1e0 rlabel = ' (pc Myr$^{-2}$)' # read in full inverse CRB for stream modeling cxi = np.load('../data/crb/bspline_cxi{:s}_{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, errmode, n, vlabel, Ndim)) if fast: cx = np.linalg.inv(cxi) else: cx = stable_inverse(cxi) # choose the appropriate components: Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1] if 'progenitor' not in vary: Nprog = 0 nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0} nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad, 'all': np.shape(cx)[0], 'point': 1} if 'progenitor' not in vary: nstart['dipole'] = Npoint nend['dipole'] = Npoint + Ndipole if component in ['bary', 'halo', 'dipole', 'quad', 'point']: components = [component] else: components = [x for x in vary if x!='progenitor'] cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]] Npot = np.shape(cq)[0] if fast: cqi = np.linalg.inv(cq) else: cqi = stable_inverse(cq) if scale: dp_opt = read_optimal_step(n, vary) dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)] scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]]) scale_mat = np.outer(scale_vec, scale_vec) cqi *= scale_mat if progenitor: prog_coords = {-1: gd1_coordinates(), -2: pal5_coordinates(), -3: tri_coordinates(), -4: atlas_coordinates()} x0, v0 = prog_coords[n] print(x0) else: x0 = np.array([4, 4, 0]) Rp = np.linalg.norm(x0[:2]) zp = x0[2] R = np.linspace(-d, d, Nb) k = x0[1]/x0[0] x = R/np.sqrt(1+k**2) y = k * x z = np.linspace(-d, d, Nb) xv, zv = np.meshgrid(x, z) yv, zv = np.meshgrid(y, z) xin = np.array([np.ravel(xv), np.ravel(yv), np.ravel(zv)]).T Npix = np.size(xv) af = np.empty((Npix, 3)) derf = np.empty((Npix*3, Npot)) for i in range(Npix): xi = xin[i]*u.kpc a = acc_cart(xi, components=components) dadq = apder_cart(xi, components=components) derf[i*3:(i+1)*3] = dadq ca = np.matmul(derf, np.matmul(cq, derf.T)) Nx = Npot Nw = Npix*3 vals, vecs = la.eigh(ca, eigvals=(Nw - Nx - 2, Nw - 1)) ## check orthogonality: #for i in range(Npot-1): #for k in range(i+1, Npot): #print(i, k) #print(np.dot(vecs[:,i], vecs[:,k])) #print(np.dot(vecs[::3,i], vecs[::3,k]), np.dot(vecs[1::3,i], vecs[1::3,k]), np.dot(vecs[1::3,i], vecs[1::3,k])) # save np.savez('../data/crb_acart_cov{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}_{:d}'.format(alabel, errmode, n, vlabel, component, Ndim, d, Nb, relative, progenitor), x=xin, der=derf, c=ca) plt.close() fig, ax = plt.subplots(1, 3, figsize=(15, 5)) if j==0: vcomb = np.sqrt(np.sum(vecs**2*vals, axis=1)) label = ['($\Sigma$ Eigval $\\times$ Eigvec$^2$ $a_{}$'.format(x)+')$^{1/2}$' for x in ['X', 'Y', 'Z']] vmin = 1e-2 vmax = 5e0 norm = mpl.colors.LogNorm() else: vcomb = vecs[:,j] label = ['Eig {} $a_{}$'.format(np.abs(j), x) for x in ['X', 'Y', 'Z']] vmin = -0.025 vmax = 0.025 norm = None for i in range(3): plt.sca(ax[i]) #im = plt.imshow(vecs[i::3,j].reshape(Nb, Nb), origin='lower', extent=[-d, d, -d, d], cmap=mpl.cm.gray, vmin=vmin, vmax=vmax) im = plt.imshow(vcomb[i::3].reshape(Nb, Nb), origin='lower', extent=[-d, d, -d, d], cmap=mpl.cm.gray, vmin=vmin, vmax=vmax, norm=norm) if progenitor: plt.plot(Rp, zp, 'r*', ms=10) plt.xlabel('R (kpc)') plt.ylabel('Z (kpc)') divider = make_axes_locatable(plt.gca()) cax = divider.append_axes("right", size="3%", pad=0.1) plt.colorbar(im, cax=cax) plt.ylabel(label[i]) plt.tight_layout() if batch: return fig else: plt.savefig('../plots/crb_acc_cart_cov{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}_{:d}_{:d}.png'.format(alabel, errmode, n, vlabel, component, np.abs(j), Ndim, d, Nb, relative, progenitor)) def a_vecfield(vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], component='all', d=20, Nb=10): """Plot acceleration field in R,z plane""" if component in ['bary', 'halo', 'dipole', 'quad', 'point']: components = [component] else: components = [x for x in vary if x!='progenitor'] x0 = np.array([4, 4, 0]) R = np.linspace(-d, d, Nb) k = x0[1]/x0[0] x = R/np.sqrt(1+k**2) y = k * x z = np.linspace(-d, d, Nb) xv, zv = np.meshgrid(x, z) yv, zv = np.meshgrid(y, z) xin = np.array([np.ravel(xv), np.ravel(yv), np.ravel(zv)]).T Rin = np.linalg.norm(xin[:,:2], axis=1) * np.sign(xin[:,0]) zin = xin[:,2] Npix = np.size(xv) acart_pix = np.empty((Npix, 3)) acyl_pix = np.empty((Npix, 2)) for i in range(Npix): xi = xin[i]*u.kpc acart = acc_cart(xi, components=components) acart_pix[i] = acart acyl_pix[:,0] = np.linalg.norm(acart_pix[:,:2], axis=1) * -np.sign(xin[:,0]) acyl_pix[:,1] = acart_pix[:,2] plt.close() plt.figure() plt.quiver(Rin, zin, acyl_pix[:,0], acyl_pix[:,1]) plt.tight_layout() def a_crbcov_vecfield(n, Ndim=6, vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], errmode='fiducial', component='all', j=0, align=True, d=20, Nb=10, fast=False, scale=True, relative=False, progenitor=False, batch=False): """""" pid, dp_fid, vlabel = get_varied_pars(vary) if align: alabel = '_align' else: alabel = '' if relative: vmin = 1e-2 vmax = 1 rlabel = ' / a' else: vmin = -0.005 vmax = 0.005 #vmin = 1e-2 #vmax = 1e0 rlabel = ' (pc Myr$^{-2}$)' # read in full inverse CRB for stream modeling cxi = np.load('../data/crb/bspline_cxi{:s}_{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, errmode, n, vlabel, Ndim)) if fast: cx = np.linalg.inv(cxi) else: cx = stable_inverse(cxi) # choose the appropriate components: Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1] if 'progenitor' not in vary: Nprog = 0 nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0} nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad, 'all': np.shape(cx)[0], 'point': 1} if 'progenitor' not in vary: nstart['dipole'] = Npoint nend['dipole'] = Npoint + Ndipole if component in ['bary', 'halo', 'dipole', 'quad', 'point']: components = [component] else: components = [x for x in vary if x!='progenitor'] cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]] Npot = np.shape(cq)[0] if fast: cqi = np.linalg.inv(cq) else: cqi = stable_inverse(cq) if scale: dp_opt = read_optimal_step(n, vary) dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)] scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]]) scale_mat = np.outer(scale_vec, scale_vec) cqi *= scale_mat if progenitor: x0, v0 = gd1_coordinates() else: x0 = np.array([4, 4, 0]) Rp = np.linalg.norm(x0[:2]) zp = x0[2] R = np.linspace(-d, d, Nb) k = x0[1]/x0[0] x = R/np.sqrt(1+k**2) y = k * x z = np.linspace(-d, d, Nb) xv, zv = np.meshgrid(x, z) yv, zv = np.meshgrid(y, z) xin = np.array([np.ravel(xv), np.ravel(yv), np.ravel(zv)]).T Rin = np.linalg.norm(xin[:,:2], axis=1) * np.sign(xin[:,0]) zin = xin[:,2] Npix = np.size(xv) acart_pix = np.empty((Npix, 3)) acyl_pix = np.empty((Npix, 2)) vcomb_pix = np.empty((Npix, 2)) af = np.empty((Npix, 3)) derf = np.empty((Npix*3, Npot)) for i in range(Npix): xi = xin[i]*u.kpc a = acc_cart(xi, components=components) acart_pix[i] = a dadq = apder_cart(xi, components=components) derf[i*3:(i+1)*3] = dadq acyl_pix[:,0] = np.linalg.norm(acart_pix[:,:2], axis=1) * -np.sign(xin[:,0]) acyl_pix[:,1] = acart_pix[:,2] ca = np.matmul(derf, np.matmul(cq, derf.T)) Nx = Npot Nw = Npix*3 vals, vecs = la.eigh(ca, eigvals=(Nw - Nx - 2, Nw - 1)) if j==0: vcomb = np.sqrt(np.sum(vecs**2*vals, axis=1)) label = ['($\Sigma$ Eigval $\\times$ Eigvec$^2$ $a_{}$'.format(x)+')$^{1/2}$' for x in ['X', 'Y', 'Z']] vmin = 1e-3 vmax = 1e-1 norm = mpl.colors.LogNorm() else: vcomb = vecs[:,j]*np.sqrt(vals[j]) label = ['Eig {} $a_{}$'.format(np.abs(j), x) for x in ['X', 'Y', 'Z']] vmin = -0.025 vmax = 0.025 norm = None vcomb_pix[:,0] = np.sqrt(vcomb[0::3]**2 + vcomb[1::3]**2) * -np.sign(xin[:,0]) #vcomb_pix[:,0] = np.sqrt(vcomb[0::3]**2 + vcomb[1::3]**2) * -np.sign(vcomb[0::3]) vcomb_pix[:,1] = vcomb[2::3] plt.close() fig, ax = plt.subplots(1,2,figsize=(10,5)) plt.sca(ax[0]) plt.quiver(Rin, zin, acyl_pix[:,0], acyl_pix[:,1], pivot='middle') plt.xlabel('R (kpc)') plt.ylabel('Z (kpc)') plt.title('Acceleration {}'.format(component), fontsize='medium') plt.sca(ax[1]) plt.quiver(Rin, zin, vcomb_pix[:,0], vcomb_pix[:,1], pivot='middle', headwidth=0, headlength=0, headaxislength=0, scale=0.02, scale_units='xy') plt.xlabel('R (kpc)') plt.ylabel('Z (kpc)') plt.title('Eigenvector {}'.format(np.abs(j)), fontsize='medium') plt.tight_layout() if batch: return fig else: plt.savefig('../plots/afield_crbcov{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}_{:d}.png'.format(alabel, errmode, n, vlabel, component, np.abs(j), Ndim, d, Nb, relative)) def summary(n, mode='scalar', vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], errmode='fiducial', component='all'): """""" pid, dp_fid, vlabel = get_varied_pars(vary) fn = {'scalar': crb_acart_cov, 'vector': a_crbcov_vecfield} bins = {'scalar': 30, 'vector': 10} Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1] Npars = {'bary': Nbary, 'halo': Nhalo, 'dipole': Ndipole, 'quad': Nquad, 'point': Npoint} if component in ['bary', 'halo', 'dipole', 'quad', 'point']: components = [component] else: components = [x for x in vary if x!='progenitor'] Niter = [Npars[x] for x in components] Niter = sum(Niter) + 1 pp = PdfPages('../plots/acceleration_{}_{}_{}_{}_{}.pdf'.format(n, errmode, vlabel, component, mode)) for i in range(Niter): print(i, Niter) fig = fn[mode](-1, progenitor=True, batch=True, errmode=errmode, vary=vary, component=component, j=-i, d=20, Nb=bins[mode]) pp.savefig(fig) pp.close() ######### # Summary def full_names(): """""" full = {'gd1': 'GD-1', 'atlas': 'ATLAS', 'tri': 'Triangulum', 'ps1a': 'PS1A', 'ps1b': 'PS1B', 'ps1c': 'PS1C', 'ps1d': 'PS1D', 'ps1e': 'PS1E', 'ophiuchus': 'Ophiuchus', 'hermus': 'Hermus', 'kwando': 'Kwando', 'orinoco': 'Orinoco', 'sangarius': 'Sangarius', 'scamander': 'Scamander'} return full def full_name(name): """""" full = full_names() return full[name] def get_done(sort_length=False): """""" done = ['gd1', 'tri', 'atlas', 'ps1a', 'ps1c', 'ps1e', 'ophiuchus', 'kwando', 'orinoco', 'sangarius', 'hermus', 'ps1d'] done = ['gd1', 'tri', 'atlas', 'ps1a', 'ps1c', 'ps1e', 'kwando', 'orinoco', 'sangarius', 'hermus', 'ps1d'] # length if sort_length: tosort = [] for name in done: mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb')) tosort += [np.max(mock['xi_range']) - np.min(mock['xi_range'])] done = [x for _,x in sorted(zip(tosort,done))] else: tosort = [] vary = ['progenitor', 'bary', 'halo'] Ndim = 6 errmode = 'fiducial' align = True pid, dp_fid, vlabel = get_varied_pars(vary) pid_vh = myutils.wherein(np.array(pid), np.array([5])) for name in done: fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel)) cxi = fm['cxi'] cx = stable_inverse(cxi) crb = np.sqrt(np.diag(cx)) tosort += [crb[pid_vh]] done = [x for _,x in sorted(zip(tosort,done))][::-1] return done def store_mocks(): """""" done = get_done() for name in done: stream = stream_model(name) np.save('../data/streams/mock_observed_{}'.format(name), stream.obs) def period(name): """Return orbital period in units of stepsize and number of complete periods""" orbit = stream_orbit(name=name) r = np.linalg.norm(orbit['x'].to(u.kpc), axis=0) a = np.abs(np.fft.rfft(r)) f = np.argmax(a[1:]) + 1 p = np.size(a)/f return (p, f) def extract_crbs(Ndim=6, vary=['progenitor', 'bary', 'halo'], component='halo', errmode='fiducial', j=0, align=True, fast=False, scale=False): """""" pid, dp_fid, vlabel = get_varied_pars(vary) names = get_done() tout = Table(names=('name', 'crb')) pparams0 = pparams_fid pid_comp, dp_fid2, vlabel2 = get_varied_pars(component) Np = len(pid_comp) pid_crb = myutils.wherein(np.array(pid), np.array(pid_comp)) plt.close() fig, ax = plt.subplots(Np,1,figsize=(10,15), subplot_kw=dict(projection='mollweide')) for name in names[:]: fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel)) cxi = fm['cxi'] if fast: cx = np.linalg.inv(cxi) else: cx = stable_inverse(cxi) crb = np.sqrt(np.diag(cx)) #print([pparams0[pid_comp[i]] for i in range(Np)]) crb_frac = [crb[pid_crb[i]]/pparams0[pid_comp[i]].value for i in range(Np)] print(name, crb_frac) stream = stream_model(name=name) for i in range(Np): plt.sca(ax[i]) color_index = np.array(crb_frac[:]) color_index[color_index>0.2] = 0.2 color_index /= 0.2 color = mpl.cm.viridis(color_index[i]) plt.plot(np.radians(stream.obs[0]), np.radians(stream.obs[1]), 'o', color=color, ms=4) for i in range(Np): plt.sca(ax[i]) #plt.xlabel('RA') plt.ylabel('Dec') plt.text(0.9, 0.9, '$\Delta$ {}'.format(get_parlabel(pid_comp[i])[0]), fontsize='medium', transform=plt.gca().transAxes, va='bottom', ha='left') plt.grid() plt.xlabel('RA') # add custom colorbar sm = plt.cm.ScalarMappable(cmap=mpl.cm.viridis, norm=plt.Normalize(vmin=0, vmax=20)) # fake up the array of the scalar mappable. Urgh... sm._A = [] if component=='bary': cb_pad = 0.1 else: cb_pad = 0.06 cb = fig.colorbar(sm, ax=ax.ravel().tolist(), pad=cb_pad, aspect=40, ticks=np.arange(0,21,5)) cb.set_label('Cramer $-$ Rao bounds (%)') #plt.tight_layout() plt.savefig('../plots/crb_onsky_{}.png'.format(component)) def vhrh_correlation(Ndim=6, vary=['progenitor', 'bary', 'halo'], component='halo', errmode='fiducial', align=True): """""" names = get_done() t = Table.read('../data/crb/ar_orbital_summary.fits') N = len(names) p = np.empty(N) pid, dp_fid, vlabel = get_varied_pars(vary) pid_comp, dp_fid2, vlabel2 = get_varied_pars(component) i = pid_comp[0] j = pid_comp[1] for e, name in enumerate(names): fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel)) cxi = fm['cxi'] cx = stable_inverse(cxi) p[e] = cx[i][j]/np.sqrt(cx[i][i]*cx[j][j]) plt.close() plt.figure() plt.plot(t['rapo'], p, 'ko') def allstream_2d(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', align=True, relative=False): """Compare 2D constraints between all streams""" pid, dp_fid, vlabel = get_varied_pars(vary) names = get_done() N = len(names) # plot setup ncol = np.int64(np.ceil(np.sqrt(N))) nrow = np.int64(np.ceil(N/ncol)) w_ = 8 h_ = 1.1 * w_*nrow/ncol alpha = 1 lw = 2 frac = [0.8, 0.5, 0.2] # parameter pairs paramids = [8, 11, 12, 13, 14] all_comb = list(itertools.combinations(paramids, 2)) comb = sorted(list(set(all_comb))) Ncomb = len(comb) #print(comb) pp = PdfPages('../plots/allstreams_2d_{}_a{:1d}_{}_r{:1d}.pdf'.format(errmode, align, vlabel, relative)) for c in range(Ncomb): l, k = comb[c] plt.close() fig, ax = plt.subplots(nrow, ncol, figsize=(w_, h_), sharex=True, sharey=True) for i in range(N): plt.sca(ax[np.int64(i/ncol)][i%ncol]) for e, Ndim in enumerate([3,4,6]): color = mpl.cm.bone(frac[e]) fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, names[i], align, vlabel)) cxi = fm['cxi'] cx = stable_inverse(cxi) cx_2d = np.array([[cx[k][k], cx[k][l]], [cx[l][k], cx[l][l]]]) if relative: pk = pparams_fid[pid[k]].value pl = pparams_fid[pid[l]].value fid_2d = np.array([[pk**2, pk*pl], [pk*pl, pl**2]]) cx_2d = cx_2d / fid_2d * 100**2 w, v = np.linalg.eig(cx_2d) if np.all(np.isreal(v)): theta = np.degrees(np.arctan2(v[1][0], v[0][0])) width = np.sqrt(w[0])*2 height = np.sqrt(w[1])*2 e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=color, alpha=alpha, lw=lw) plt.gca().add_patch(e) txt = plt.text(0.9, 0.9, full_name(names[i]), fontsize='small', transform=plt.gca().transAxes, ha='right', va='top') txt.set_bbox(dict(facecolor='w', alpha=0.7, ec='none')) if relative: plt.xlim(-20, 20) plt.ylim(-20,20) else: plt.gca().autoscale_view() plabels, units = get_parlabel([pid[k],pid[l]]) if relative: punits = [' (%)' for x in units] else: punits = [' ({})'.format(x) if len(x) else '' for x in units] params = ['$\Delta$ {}{}'.format(x, y) for x,y in zip(plabels, punits)] for i in range(ncol): plt.sca(ax[nrow-1][i]) plt.xlabel(params[0]) for i in range(nrow): plt.sca(ax[i][0]) plt.ylabel(params[1]) for i in range(N, ncol*nrow): plt.sca(ax[np.int64(i/ncol)][i%ncol]) plt.axis('off') plt.tight_layout(h_pad=0, w_pad=0) pp.savefig(fig) pp.close() # circular velocity def pder_vc(x, p=[pparams_fid[j] for j in [0,1,2,3,4,5,6,8,10]], components=['bary', 'halo']): """""" N = np.size(x) # components if 'bary' in components: bulge = np.array([G*x*(x+p[1])**-2, -2*G*p[0]*x*(x+p[1])**-3]) aux = p[3] + p[4] disk = np.array([G*x**2*(x**2 + aux**2)**-1.5, -3*G*p[2]*x**2*aux*(x**2 + aux**2)**-2.5, -3*G*p[2]*x**2*aux*(x**2 + aux**2)**-2.5]) nfw = np.array([2*p[5]*(p[6]/x*np.log(1+x.value/p[6].value) - (1+x.value/p[6].value)**-1), p[5]**2*(np.log(1+x.value/p[6].value)/x - (x+p[6])**-1 - x*(x+p[6])**-2), np.zeros(N), np.zeros(N)]) pder = np.vstack([bulge, disk, nfw]) else: pder = np.array([2*p[0]*(p[1]/x*np.log(1+x.value/p[1].value) - (1+x.value/p[1].value)**-1), p[0]**2*(np.log(1+x.value/p[1].value)/x - (x+p[1])**-1 - x*(x+p[1])**-2), np.zeros(N), np.zeros(N)]) return pder def delta_vc_vec(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', component='all', j=0, align=True, d=200, Nb=1000, fast=False, scale=False, ascale=False): """""" pid, dp_fid, vlabel = get_varied_pars(vary) names = get_done() labels = full_names() colors = {x: mpl.cm.bone(e/len(names)) for e, x in enumerate(names)} #colors = {'gd1': mpl.cm.bone(0), 'atlas': mpl.cm.bone(0.5), 'tri': mpl.cm.bone(0.8)} plt.close() fig, ax = plt.subplots(1,2,figsize=(10,5)) for name in names: # read in full inverse CRB for stream modeling fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel)) cxi = fm['cxi'] if fast: cx = np.linalg.inv(cxi) else: cx = stable_inverse(cxi) # choose the appropriate components: Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1] if 'progenitor' not in vary: Nprog = 0 nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0} nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad, 'all': np.shape(cx)[0], 'point': 1} if 'progenitor' not in vary: nstart['dipole'] = Npoint nend['dipole'] = Npoint + Ndipole if component in ['bary', 'halo', 'dipole', 'quad', 'point']: components = [component] else: components = [x for x in vary if x!='progenitor'] cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]] Npot = np.shape(cq)[0] if fast: cqi = np.linalg.inv(cq) else: cqi = stable_inverse(cq) if scale: dp_opt = read_optimal_step(name, vary) dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)] scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]]) scale_mat = np.outer(scale_vec, scale_vec) cqi *= scale_mat x = np.linspace(0.01, d, Nb)*u.kpc Npix = np.size(x) derf = np.transpose(pder_vc(x, components=components)) ca = np.matmul(derf, np.matmul(cq, derf.T)) Nx = Npot Nw = Nb vals, vecs = la.eigh(ca, eigvals=(Nw - Nx - 2, Nw - 1)) if j==0: vcomb = np.sqrt(np.sum(vecs**2*vals, axis=1)) #label = ['($\Sigma$ Eigval $\\times$ Eigvec$^2$ $a_{}$'.format(x)+')$^{1/2}$' for x in ['X', 'Y', 'Z']] else: vcomb = vecs[:,j]*np.sqrt(vals[j]) #label = ['Eig {} $a_{}$'.format(np.abs(j), x) for x in ['X', 'Y', 'Z']] mcomb = (vcomb*u.km**2*u.s**-2 * x / G).to(u.Msun) vc_true = vcirc_potential(x, pparams=pparams_fid) # relate to orbit orbit = stream_orbit(name=name) r = np.linalg.norm(orbit['x'].to(u.kpc), axis=0) rmin = np.min(r) rmax = np.max(r) rcur = r[0] r0 = r[-1] print(name, rcur, r0) e = (rmax - rmin)/(rmax + rmin) l = np.cross(orbit['x'].to(u.kpc), orbit['v'].to(u.km/u.s), axisa=0, axisb=0) p, Np = period(name) np.savez('../data/crb/vcirc_{:s}{:1d}_{:s}_a{:1d}_{:s}'.format(errmode, Ndim, name, align, vlabel), dvc=np.sqrt(vcomb), vc=vc_true.value, r=x.value, rperi=rmin, rapo=rmax, rcur=rcur, r0=r0, ecc=e, l=l, p=p, Np=Np) if ascale: x = x * rmax**-1 #x = x * rcur**-1 # plot plt.sca(ax[0]) plt.plot(x, np.sqrt(vcomb), '-', lw=3, color=colors[name], label=labels[name]) #plt.plot(x, vc_true, 'r-') plt.sca(ax[1]) plt.plot(x, np.sqrt(vcomb)/vc_true, '-', lw=3, color=colors[name], label=labels[name]) #plt.plot(x, mcomb, '-', lw=3, color=colors[name], label=labels[name]) plt.sca(ax[0]) if ascale: plt.xlim(0,5) plt.xlabel('r/r$_{apo}$') else: plt.xlabel('r (kpc)') plt.ylabel('$\Delta$ $V_c$ (km s$^{-1}$)') #plt.ylim(0, 100) plt.sca(ax[1]) plt.legend(loc=1, frameon=True, handlelength=1, fontsize='small') if ascale: plt.xlim(0,5) plt.xlabel('r/r$_{apo}$') else: plt.xlabel('r (kpc)') plt.ylabel('$\Delta$ $V_c$ / $V_c$') #plt.ylabel('$\Delta$ $M_{enc}$ ($M_\odot$)') #plt.ylim(0, 1e11) plt.tight_layout() plt.savefig('../plots/vc_r_summary_apo{:d}.pdf'.format(ascale)) def delta_vc_correlations(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', component='all', j=0, align=True, d=200, Nb=1000, r=False, fast=False, scale=False): """""" pid, dp_fid, vlabel = get_varied_pars(vary) elabel = '' ylabel = 'min ($\Delta$ $V_c$ / $V_c$)' if r: ylabel = 'r(min($\Delta$ $V_c$ / $V_c$)) (kpc)' elabel = 'r' names = get_done() labels = full_names() colors = {x: mpl.cm.bone(e/len(names)) for e, x in enumerate(names)} plt.close() fig, ax = plt.subplots(2,3,figsize=(15,9)) for name in names: d = np.load('../data/crb/vcirc_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel)) rel_dvc = np.min(d['dvc'] / d['vc']) if r: idmin = np.argmin(d['dvc'] / d['vc']) rel_dvc = d['r'][idmin] mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb')) dlambda = np.max(mock['xi_range']) - np.min(mock['xi_range']) plt.sca(ax[0][0]) if r: plt.plot(d['rapo'], d['rapo'], 'r.', zorder=0, lw=1.5) plt.plot(d['rapo'], rel_dvc, 'o', ms=10, color=colors[name], label=labels[name]) plt.xlabel('$r_{apo}$ (kpc)') plt.ylabel(ylabel) plt.sca(ax[0][1]) #plt.plot(d['rcur']/d['rapo'], rel_dvc, 'o', ms=10, color=colors[name]) if r: plt.plot(d['rapo'], d['rapo'], 'r.', zorder=0, lw=1.5) plt.plot(d['rcur'], rel_dvc, 'o', ms=10, color=colors[name]) #plt.plot(d['r0'], rel_dvc, 'ro') plt.xlabel('$r_{current}$') plt.ylabel(ylabel) plt.sca(ax[0][2]) ecc = np.sqrt(1 - (d['rperi']/d['rapo'])**2) ecc = d['ecc'] plt.plot(ecc, rel_dvc, 'o', ms=10, color=colors[name], label=labels[name]) plt.xlabel('Eccentricity') plt.ylabel(ylabel) plt.sca(ax[1][0]) plt.plot(np.median(np.abs(d['l'][:,2])/np.linalg.norm(d['l'], axis=1)), rel_dvc, 'o', ms=10, color=colors[name]) plt.xlabel('|L_z|/|L|') plt.ylabel(ylabel) plt.sca(ax[1][1]) plt.plot(d['Np'], rel_dvc, 'o', ms=10, color=colors[name]) #plt.xlabel('$r_{peri}$ (kpc)') plt.xlabel('Completed periods') plt.ylabel(ylabel) plt.sca(ax[1][2]) plt.plot(dlambda, rel_dvc, 'o', ms=10, color=colors[name]) plt.xlabel('$\Delta$ $\\xi$ (deg)') plt.ylabel(ylabel) plt.sca(ax[0][2]) plt.legend(fontsize='small', handlelength=0.1) plt.tight_layout() plt.savefig('../plots/delta_vc{}_correlations.pdf'.format(elabel)) def collate_orbit(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', align=True): """Store all of the properties on streams""" pid, dp_fid, vlabel = get_varied_pars(vary) names = get_done() N = len(names) Nmax = len(max(names, key=len)) tname = np.chararray(N, itemsize=Nmax) vcmin = np.empty(N) r_vcmin = np.empty(N) Labs = np.empty((N,3)) lx = np.empty(N) ly = np.empty(N) lz = np.empty(N) Lmod = np.empty(N) period = np.empty(N) Nperiod = np.empty(N) ecc = np.empty(N) rperi = np.empty(N) rapo = np.empty(N) rcur = np.empty(N) length = np.empty(N) for e, name in enumerate(names[:]): d = np.load('../data/crb/vcirc_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel)) idmin = np.argmin(d['dvc'] / d['vc']) mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb')) dlambda = np.max(mock['xi_range']) - np.min(mock['xi_range']) tname[e] = name vcmin[e] = (d['dvc'] / d['vc'])[idmin] r_vcmin[e] = d['r'][idmin] if e==0: Nr = np.size(d['r']) dvc = np.empty((N, Nr)) vc = np.empty((N, Nr)) r = np.empty((N, Nr)) dvc[e] = d['dvc'] vc[e] = d['dvc'] / d['vc'] r[e] = d['r'] Labs[e] = np.median(np.abs(d['l']), axis=0) Lmod[e] = np.median(np.linalg.norm(d['l'], axis=1)) lx[e] = np.abs(np.median(d['l'][:,0]/np.linalg.norm(d['l'], axis=1))) ly[e] = np.abs(np.median(d['l'][:,1]/np.linalg.norm(d['l'], axis=1))) lz[e] = np.abs(np.median(d['l'][:,2]/np.linalg.norm(d['l'], axis=1))) period[e] = d['p'] Nperiod[e] = d['Np'] ecc[e] = d['ecc'] rperi[e] = d['rperi'] rapo[e] = d['rapo'] rcur[e] = d['rcur'] length[e] = dlambda t = Table([tname, vcmin, r_vcmin, dvc, vc, r, Labs, Lmod, lx, ly, lz, period, Nperiod, length, ecc, rperi, rapo, rcur], names=('name', 'vcmin', 'rmin', 'dvc', 'vc', 'r', 'Labs', 'Lmod', 'lx', 'ly', 'lz', 'period', 'Nperiod', 'length', 'ecc', 'rperi', 'rapo', 'rcur')) t.pprint() t.write('../data/crb/vc_orbital_summary.fits', overwrite=True) # radial acceleration def ar_r(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', align=True, Nsight=1, seed=39): """Calculate precision in radial acceleration as a function of galactocentric radius""" np.random.seed(seed) pid, dp_fid, vlabel = get_varied_pars(vary) components = [c for c in vary if c!='progenitor'] names = get_done() N = len(names) Nmax = len(max(names, key=len)) tname = np.chararray(N, itemsize=Nmax) armin = np.empty((N, Nsight)) r_armin = np.empty((N, Nsight)) Labs = np.empty((N,3)) lx = np.empty(N) ly = np.empty(N) lz = np.empty(N) Lmod = np.empty(N) period_ = np.empty(N) Nperiod = np.empty(N) ecc = np.empty(N) rperi = np.empty(N) rapo = np.empty(N) rcur = np.empty(N) length = np.empty(N) Npix = 300 r = np.linspace(0.1, 200, Npix) dar = np.empty((N, Nsight, Npix)) ar = np.empty((N, Nsight, Npix)) rall = np.empty((N, Nsight, Npix)) plt.close() fig, ax = plt.subplots(1,3, figsize=(15,5)) for e, name in enumerate(names[:]): # read in full inverse CRB for stream modeling fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel)) cxi = fm['cxi'] cx = stable_inverse(cxi) cq = cx[6:,6:] Npot =
np.shape(cq)
numpy.shape
import numpy as np import numpy.ma as ma from matplotlib import collections import toolbox import pylab import time from functools import wraps def timer(function): @wraps(function) def function_timer(*args, **kwargs): t0 = time.time() result = function(*args, **kwargs) t1 = time.time() print ("Total time running %s: %s seconds" % (function.func_name, str(t1-t0)) ) return result return function_timer def wiggle(frame, scale=1.0): fig = pylab.figure() ax = fig.add_subplot(111) ns = frame['ns'][0] nt = frame.size scalar = scale*frame.size/(frame.size*0.2) #scales the trace amplitudes relative to the number of traces frame['trace'][:,-1] = np.nan #set the very last value to nan. this is a lazy way to prevent wrapping vals = frame['trace'].ravel() #flat view of the 2d array. vect = np.arange(vals.size).astype(np.float) #flat index array, for correctly locating zero crossings in the flat view crossing = np.where(np.diff(np.signbit(vals)))[0] #index before zero crossing #use linear interpolation to find the zero crossing, i.e. y = mx + c. x1= vals[crossing] x2 = vals[crossing+1] y1 = vect[crossing] y2 = vect[crossing+1] m = (y2 - y1)/(x2-x1) c = y1 - m*x1 #tack these values onto the end of the existing data x = np.hstack([vals,
np.zeros_like(c)
numpy.zeros_like
''' Simple mathematical functions that will be used throughout this package. Some might be useful outside of this package. ''' from warnings import warn import numpy as np from astropy import units as u from astropy.time import Time from astropy.coordinates import AltAz from astropy.visualization import ZScaleInterval, ImageNormalize from astropy.io import fits import ccdproc __all__ = ["MEDCOMB_KEYS_INT", "SUMCOMB_KEYS_INT", "MEDCOMB_KEYS_FLT32", "LACOSMIC_KEYS", "binning", "fitsxy2py", "give_stats", "calc_airmass", "airmass_obs", "chk_keyval"] MEDCOMB_KEYS_INT = dict(dtype='int16', combine_method='median', reject_method=None, unit=u.adu, combine_uncertainty_function=None) SUMCOMB_KEYS_INT = dict(dtype='int16', combine_method='sum', reject_method=None, unit=u.adu, combine_uncertainty_function=None) MEDCOMB_KEYS_FLT32 = dict(dtype='float32', combine_method='median', reject_method=None, unit=u.adu, combine_uncertainty_function=None) # I skipped two params in IRAF LACOSMIC: gain=2.0, readnoise=6. LACOSMIC_KEYS = dict(sigclip=4.5, sigfrac=0.5, objlim=1.0, satlevel=np.inf, pssl=0.0, niter=4, sepmed=False, cleantype='medmask', fsmode='median', psfmodel='gauss', psffwhm=2.5, psfsize=7, psfk=None, psfbeta=4.765) def binning(arr, factor_x=1, factor_y=1, binfunc=np.mean, trim_end=False): ''' Bins the given arr frame. Paramters --------- arr: 2d array The array to be binned factor_x, factor_y: int The binning factors in x, y direction. binfunc : funciton object The function to be applied for binning, such as ``np.sum``, ``np.mean``, and ``np.median``. trim_end: bool Whether to trim the end of x, y axes such that binning is done without error. ''' binned = arr.copy() if trim_end: ny, nx = binned.shape iy_max = ny - (ny % factor_y) ix_max = nx - (nx % factor_x) binned = binned[:iy_max, :ix_max] ny, nx = binned.shape nby = ny // factor_y nbx = nx // factor_x binned = binned.reshape(nby, factor_y, nbx, factor_x) binned = binfunc(binned, axis=(-1, 1)) return binned def fitsxy2py(fits_section): ''' Given FITS section in str, returns the slices in python convention. Parameters ---------- fits_section : str The section specified by FITS convention, i.e., bracket embraced, comma separated, XY order, 1-indexing, and including the end index. Note ---- >>> np.eye(5)[fitsxy2py('[1:2,:]')] # array([[1., 0.], # [0., 1.], # [0., 0.], # [0., 0.], # [0., 0.]]) ''' slicer = ccdproc.utils.slices.slice_from_string sl = slicer(fits_section, fits_convention=True) return sl def give_stats(item, extension=0, percentiles=[1, 99], N_extrema=None): ''' Calculates simple statistics. Parameters ---------- item: array-like or path-like The nddata or path to a FITS file to be analyzed. extension: int, str, optional The extension if ``item`` is the path to the FITS file. percentiles: list-like, optional The percentiles to be calculated. N_extrema: int, optinoal The number of low and high elements to be returned when the whole data are sorted. If ``None``, it will not be calculated. If ``1``, it is identical to min/max values. Example ------- >>> bias = CCDData.read("bias_bin11.fits") >>> dark = CCDData.read("pdark_300s_27C_bin11.fits") >>> percentiles = [0.1, 1, 5, 95, 99, 99.9] >>> give_stats(bias, percentiles=percentiles, N_extrema=5) >>> give_stats(dark, percentiles=percentiles, N_extrema=5) Or just simply >>> give_stats("bias_bin11.fits", percentiles=percentiles, N_extrema=5) ''' try: hdul = fits.open(item) data = hdul[extension].data hdul.close() except (FileNotFoundError, IndentationError, AttributeError, ValueError): data = np.atleast_1d(item) result = {} d_num = np.size(data) d_min = np.min(data) d_pct = np.percentile(data, percentiles) d_max = np.max(data) d_avg = np.mean(data) d_med = np.median(data) d_std = np.std(data, ddof=1) zs = ImageNormalize(data, interval=ZScaleInterval()) d_zmin = zs.vmin d_zmax = zs.vmax result["N"] = d_num result["min"] = d_min result["max"] = d_max result["avg"] = d_avg result["med"] = d_med result["std"] = d_std result["percentiles"] = d_pct result["zmin"] = d_zmin result["zmax"] = d_zmax if N_extrema is not None: data_flatten = np.sort(data, axis=None) # axis=None will do flatten. d_los = data_flatten[:N_extrema] d_his = data_flatten[-1 * N_extrema:] result["ext_lo"] = d_los result["ext_hi"] = d_his return result def calc_airmass(zd_deg=None, cos_zd=None, scale=750.): ''' Calculate airmass by nonrefracting radially symmetric atmosphere model. Note ---- Wiki: https://en.wikipedia.org/wiki/Air_mass_(astronomy)#Nonrefracting_radially_symmetrical_atmosphere Identical to the airmass calculation for a given observational run of IRAF's asutil.setairmass: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?setairmass Parameters ---------- zd_deg : float, optional The zenithal distance in degrees cos_zd : float, optional The cosine of zenithal distance. If given, ``zd_deg`` is not used. scale : float, optional Earth radius divided by the atmospheric height (usually scale height) of the atmosphere. In IRAF documentation, it is mistakenly written that this ``scale`` is the "scale height". ''' if zd_deg is None and cos_zd is None: raise ValueError("Either zd_deg or cos_zd should not be None.") if cos_zd is None: cos_zd = np.cos(
np.deg2rad(zd_deg)
numpy.deg2rad
import unittest import numpy as np from polynomials_on_simplices.calculus.error_measures import relative_error from polynomials_on_simplices.geometry.mesh.basic_meshes.triangle_meshes import equilateral_triangle_vertices from polynomials_on_simplices.geometry.primitives.simplex import ( affine_transformation_from_unit, cartesian_to_barycentric_unit, unit) import polynomials_on_simplices.geometry.primitives.triangle as triangle from polynomials_on_simplices.linalg.rigid_motion import move, random_rigid_motion from polynomials_on_simplices.linalg.vector_space_projection import vector_projection from polynomials_on_simplices.probability_theory.uniform_sampling import nsimplex_sampling class TestEdges(unittest.TestCase): def test_2d(self): vertices = unit(2) edges = triangle.edges(vertices) expected_edges = np.array([ [-1.0, 1.0], [0.0, -1.0], [1.0, 0.0] ]) self.assertTrue(np.allclose(expected_edges, edges)) def test_3d(self): vertices = unit(2, 3) edges = triangle.edges(vertices) expected_edges = np.array([ [-1.0, 1.0, 0.0], [0.0, -1.0, 0.0], [1.0, 0.0, 0.0] ]) self.assertTrue(np.allclose(expected_edges, edges)) def test_diameter(self): vertices = unit(2) d = triangle.diameter(vertices) self.assertEqual(d, np.sqrt(2)) def test_dual_edges_2d(self): vertices = np.random.rand(3, 2) edges = triangle.edges(vertices) dual_edges = triangle.dual_edges(vertices) # Test that dual edges are orthogonal to edges for i in range(3): self.assertTrue(abs(np.dot(edges[i], dual_edges[i])) < 1e-10) # Test that dual edges point out of the triangle by comparing with the vector from the edge midpoint to the # triangle centroid c = triangle.centroid(vertices) for i in range(3): edge_midpoint = 0.5 * (vertices[(i + 1) % 3] + vertices[(i + 2) % 3]) self.assertTrue(np.dot(dual_edges[i], c - edge_midpoint) < 0.0) def test_dual_edges_3d(self): vertices = np.random.rand(3, 3) edges = triangle.edges(vertices) dual_edges = triangle.dual_edges(vertices) # Test that dual edges are orthogonal to edges for i in range(3): self.assertTrue(abs(np.dot(edges[i], dual_edges[i])) < 1e-10) # Test that dual edges point out of the triangle by comparing with the vector from the edge midpoint to the # triangle centroid c = triangle.centroid(vertices) for i in range(3): edge_midpoint = 0.5 * (vertices[(i + 1) % 3] + vertices[(i + 2) % 3]) self.assertTrue(np.dot(dual_edges[i], c - edge_midpoint) < 0.0) class TestBasis(unittest.TestCase): def test_2d(self): p = np.random.rand(3, 2) b = triangle.basis(p) self.assertTrue(abs(np.dot(b[0], b[1])) < 1e-10) self.assertTrue(abs(np.linalg.norm(b[0]) - 1) < 1e-10) self.assertTrue(abs(np.linalg.norm(b[1]) - 1) < 1e-10) def test_3d(self): p = np.random.rand(3, 3) b = triangle.basis(p) self.assertTrue(abs(np.dot(b[0], b[1])) < 1e-10) self.assertTrue(abs(np.linalg.norm(b[0]) - 1) < 1e-10) self.assertTrue(abs(np.linalg.norm(b[1]) - 1) < 1e-10) n1 = np.cross(b[0], b[1]) n2 = triangle.normal(p) self.assertTrue(np.allclose(n1, n2)) class TestArea(unittest.TestCase): def test_unit(self): vertices = unit(2) a = triangle.area(vertices) ea = 0.5 self.assertEqual(ea, a) vertices = unit(2, 3) a = triangle.area(vertices) self.assertEqual(ea, a) def test_arbitrary(self): vertices = np.array([ [0.1, 0.2], [0.7, -0.2], [0.4, 0.5] ]) a = triangle.area(vertices) at, bt = affine_transformation_from_unit(vertices) ea = 0.5 * np.abs(np.linalg.det(at)) self.assertAlmostEqual(ea, a) def test_random(self): vertices = np.random.rand(3, 2) a = triangle.area(vertices) at, bt = affine_transformation_from_unit(vertices) ea = 0.5 * np.abs(np.linalg.det(at)) self.assertAlmostEqual(ea, a) # Transform triangle to 3D and move arbitrarily in space vertices = np.concatenate((vertices, np.zeros((3, 1))), axis=1) r, t = random_rigid_motion() vertices = move(r, t, vertices.T).T a = triangle.area(vertices) self.assertAlmostEqual(ea, a) class TestAngle(unittest.TestCase): def test_2d(self): vertices = unit(2) expected_angles = [np.pi / 2, np.pi / 4, np.pi / 4] for i in range(3): a = triangle.angle(vertices, i) self.assertAlmostEqual(expected_angles[i], a) def test_3d(self): vertices = equilateral_triangle_vertices(1.0, 2) for i in range(3): a = triangle.angle(vertices, i) self.assertAlmostEqual(np.pi / 3, a) class TestMedians(unittest.TestCase): def test_2d(self): vertices = unit(2) medians = triangle.medians(vertices) expected_medians = np.array([ [0.5, 0.5], [-1.0, 0.5], [0.5, -1.0] ]) for i in range(3): self.assertTrue(np.allclose(medians[i], expected_medians[i])) def test_3d(self): vertices = unit(2, 3) medians = triangle.medians(vertices) expected_medians = np.array([ [0.5, 0.5, 0.0], [-1.0, 0.5, 0.0], [0.5, -1.0, 0.0] ]) for i in range(3): self.assertTrue(np.allclose(medians[i], expected_medians[i])) def random_nondegenerate_triangle_2d(): """Create a random non degenerate triangle in 2d.""" vertices = np.random.rand(3, 2) while triangle.is_degenerate(vertices): vertices = np.random.rand(3, 2) return vertices class TestCircumcenter(unittest.TestCase): def test_random(self): vertices = random_nondegenerate_triangle_2d() c = triangle.circumcenter(vertices) d = np.empty(3) for i in range(3): d[i] = np.linalg.norm(c - vertices[i]) for i in range(3): self.assertAlmostEqual(d[i], d[(i + 1) % 3]) def test_radius(self): vertices = random_nondegenerate_triangle_2d() c = triangle.circumcenter(vertices) r = triangle.circumradius(vertices) for i in range(3): d = np.linalg.norm(c - vertices[i]) self.assertAlmostEqual(d, r) class TestInCenter(unittest.TestCase): def test_euler_triangle_formula(self): # Verify that Euler's triangle formula holds vertices = random_nondegenerate_triangle_2d() o = triangle.circumcenter(vertices) i = triangle.incenter(vertices) d2 = np.dot(i - o, i - o) R = triangle.circumradius(vertices) r = triangle.inradius(vertices) self.assertTrue(relative_error(d2, R * (R - 2 * r)) < 1e-5) def test_maximum_inscribed_circle(self): vertices = random_nondegenerate_triangle_2d() eps = 1e-10 # Going from the incenter in any direction a distance r - eps, where r is the inradius, we should # always stay inside the triangle v = np.random.rand(2) v /= np.linalg.norm(v) r = triangle.inradius(vertices) ic = triangle.incenter(vertices) p = ic + (r - eps) * v self.assertTrue(triangle.inside_triangle(p, vertices)) # Going from the incenter in the direction of the triangle dual edges a distance r + eps, where r is the # inradius, should take us outside of the triangle for some dual edge. Otherwise the maximum inscribed # circle property is not satisfied e = triangle.dual_edges(vertices) p = np.empty((3, 2)) for i in range(3): p[i] = ic + (r + eps) * e[i] / np.linalg.norm(e[i]) self.assertTrue( (not triangle.inside_triangle(p[0], vertices)) or (not triangle.inside_triangle(p[1], vertices)) or (not triangle.inside_triangle(p[2], vertices)) ) class TestAltitudes(unittest.TestCase): def test_orthogonality(self): # Altitude vectors should be orthogonal to the opposite edge vector vertices = np.random.rand(3, 3) av = triangle.altitude_vectors(vertices) ev = triangle.edges(vertices) for i in range(3): self.assertAlmostEqual(np.dot(av[i], ev[i]), 0.0) def test_direction(self): # Altitude vectors should point from a vertex towards the opposite edge vertices = np.random.rand(3, 3) av = triangle.altitude_vectors(vertices) for i in range(3): d1 = np.dot(av[i], vertices[(i + 1) % 3] - vertices[i]) d2 =
np.dot(av[i], vertices[(i + 2) % 3] - vertices[i])
numpy.dot
import json import pathlib import struct import soundfile import scipy.signal import numpy as np import oddvoices.phonology def midi_note_to_hertz(midi_note): return 440 * 2 ** ((midi_note - 69) / 12) def seconds_to_timestamp(seconds): minutes = int(seconds / 60) remaining_seconds = seconds - minutes * 60 return f"{minutes}:{remaining_seconds:.02f}" AUTOCORRELATION_WINDOW_SIZE_NUMBER_OF_PERIODS = 8 RANDOMIZED_PHASE_CUTOFF = 3000.0 class CorpusAnalyzer: def __init__(self, directory): root = pathlib.Path(directory) sound_file = root / "audio.wav" label_file = root / "labels.txt" info_file = root / "database.json" with open(info_file) as f: info = json.load(f) self.expected_f0: float = midi_note_to_hertz(info["f0_midi_note"]) self.audio: np.array self.rate: int self.audio, self.rate = soundfile.read(sound_file) self.n_randomized_phases = int(RANDOMIZED_PHASE_CUTOFF / self.expected_f0) np.random.seed(0) self.randomized_phases = np.exp(
np.random.random((self.n_randomized_phases,))
numpy.random.random
import os from typing import Optional, List, Tuple, Iterable, Dict, Union, Any import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from flambe.dataset import Dataset from flambe.compile import registrable_factory from flambe.field import Field class DataView: """TabularDataset view for the train, val or test split. This class must be used only internally in the TabularDataset class. A DataView is a lazy Iterable that receives the operations from the TabularDataset object. When __getitem__ is called, then all the fields defined in the transform are applied. This object can cache examples already transformed. To enable this, make sure to use this view under a Singleton pattern (there must only be one DataView per split in the TabularDataset). """ def __init__(self, data: np.ndarray, transform_hooks: List[Tuple[Field, Union[int, List[int]]]], cache: bool) -> None: """ Parameters ---------- data: np.ndarray A 2d numpy array holding the data transform_hooks: List[Tuple[Field, Union[int, List[int]]]] The transformations that will be applied to each example. cache: bool To apply cache or not. """ self.data = data # Stores the raw data self.transform_hooks = transform_hooks self.cache = cache # Caches the transformed data self.cached_data: Dict[int, Any] = {} @property def raw(self): """Returns an subscriptable version of the data""" return self.data def __getitem__(self, index): """ Get an item from an index and apply the transformations dinamically. """ if self.data is None: raise IndexError() if self.cache and index in self.cached_data: return self.cached_data[index] ex = self.data[index] if len(self.transform_hooks) > 0: ret = [] for field, cols in self.transform_hooks: _ex = ex[cols] if isinstance(cols, List): processed_ex = field.process(*_ex) else: processed_ex = field.process(_ex) if isinstance(processed_ex, tuple): ret.extend(processed_ex) else: ret.append(processed_ex) ret = tuple(ret) else: ret = tuple(ex) if self.cache: self.cached_data[index] = ret return ret def is_empty(self) -> bool: """ Return if the DataView has data """ return len(self) == 0 def cols(self) -> int: """ Return the amount of columns the DataView has.""" if self.is_empty(): raise ValueError("Empty DataView contains no columns") return len(self[0]) def __len__(self) -> int: """ Return the length of the dataview, ie the amount of examples it contains. """ if self.data is None: return 0 return len(self.data) def __setitem__(self): """Raise an error as DataViews are immutable.""" raise ValueError("Dataset objects are immutable") def __delitem__(self): """Raise an error as DataViews are immutable.""" raise ValueError("Dataset objects are immutable") class TabularDataset(Dataset): """Loader for tabular data, usually in `csv` or `tsv` format. A TabularDataset can represent any data that can be organized in a table. Internally, we store all information in a 2D numpy generic array. This object also behaves as a sequence over the whole dataset, chaining the training, validation and test data, in that order. This is useful in creating vocabularies or loading embeddings over the full datasets. Attributes ---------- train: np.ndarray The list of training examples val: np.ndarray The list of validation examples test: np.ndarray The list of text examples """ def __init__(self, train: Iterable[Iterable], val: Optional[Iterable[Iterable]] = None, test: Optional[Iterable[Iterable]] = None, cache: bool = True, named_columns: Optional[List[str]] = None, transform: Dict[str, Union[Field, Dict]] = None) -> None: """Initialize the TabularDataset. Parameters ---------- train: Iterable[Iterable] The train data val: Iterable[Iterable], optional The val data, optional test: Iterable[Iterable], optional The test data, optional cache: bool Whether to cache fetched examples. Only use True if the dataset fits in memory. Defaults to False. named_columns: Optional[List[Union[str, int]]] The columns' names of the dataset, in order. transform: Dict[str, Dict[str, Any]] The fields to be applied to the columns. Each field is identified with a name for easy linking. For example: { 'text': {'field': SomeField(), 'columns': [0, 1]}, 'label': {'field': SomeOtherField(), 'columns': 2} } """ self._train = np.array(train, dtype=np.object) self._val = None self._test = None if val is not None: self._val = np.array(val, dtype=np.object) if test is not None: self._test = np.array(test, dtype=np.object) self.cache = cache self.named_columns = named_columns cols = [] # All datasets should be 2-dimensional for k, d in {"val": self._val, "test": self._test, "train": self._train}.items(): if d is not None: cols.append(d.shape[-1]) if len(d.shape) != 2: # This happens when examples differ in the amount of # columns and numpy stores them in a 1-D tensor # (with tuples as values) raise ValueError( f"{k} dataset contains examples with different amount of columns" ) # Check that all splits contain same columns if np.unique(cols).shape != (1,): raise ValueError("All splits containing data should have same amount of columns") if named_columns and len(named_columns) != cols[0]: raise ValueError("Columns parameter should have same size as the dataset's amount " + " of columns") # Store the hooks for lazy loading self.transform_hooks: List[Tuple[Field, Union[int, List[int]]]] = [] self.transform = transform if transform: self._set_transforms(transform) self.train_view: Optional[DataView] = None self.val_view: Optional[DataView] = None self.test_view: Optional[DataView] = None def _set_transforms(self, transform: Dict[str, Union[Field, Dict]]) -> None: """Set transformations attributes and hooks to the data splits. This method adds attributes for each field in the transform dict. It also adds hooks for the 'process' call in each field. ATTENTION: This method works with the _train, _val and _test hidden attributes as this runs in the constructor and creates the hooks to be used in creating the properties. """ columns: Union[int, List[int]] for k, t in enumerate(transform.items()): name, value = t if isinstance(value, Field): field = value columns = k else: try: field, tmp_cols = value['field'], value.get('columns', k) # Process as list to avoid repeating code if not isinstance(tmp_cols, List): tmp_cols = [tmp_cols] for i, c in enumerate(tmp_cols[:]): if isinstance(c, str): if not self.named_columns: raise ValueError( "Columns parameter is required for str-based indexing" ) try: tmp_cols[i] = self.named_columns.index(c) except ValueError: raise ValueError( f"Dataset has no column name {c}. " + f"Available columns: {self.named_columns}" ) columns = tmp_cols # If it was a value originally then process # it as a single value if len(tmp_cols) == 1: columns = tmp_cols[0] except KeyError: raise ValueError( f"If a dict is provided in 'transform', then it must have the 'field' key." f" transform item = {k, t}" ) setattr(self, name, field) args = [self._train[:, columns]] if self._val is not None: args.append(self._val[:, columns]) if self._test is not None: args.append(self._test[:, columns]) field.setup(*args) self.transform_hooks.append((field, columns)) @registrable_factory @classmethod def from_path(cls, train_path: str, val_path: Optional[str] = None, test_path: Optional[str] = None, sep: Optional[str] = '\t', header: Optional[str] = 'infer', columns: Optional[Union[List[str], List[int]]] = None, encoding: Optional[str] = 'utf-8', transform: Dict[str, Union[Field, Dict]] = None) -> 'TabularDataset': """Load a TabularDataset from the given file paths. Parameters ---------- train_path : str The path to the train data val_path : str, optional The path to the optional validation data test_path : str, optional The path to the optional test data sep: str Separator to pass to the `read_csv` method header: Optional[Union[str, int]] Use 0 for first line, None for no headers, and 'infer' to detect it automatically, defaults to 'infer' columns: List[str] List of columns to load, can be used to select a subset of columns, or change their order at loading time encoding: str The encoding format passed to the pandas reader transform: Dict[str, Union[Field, Dict]] The fields to be applied to the columns. Each field is identified with a name for easy linking. """ if ( columns and any(isinstance(c, int) for c in columns) and any(isinstance(c, str) for c in columns) ): raise ValueError("Columns parameters need to be all string or all integers.") train, cols = cls._load_file(train_path, sep, header, columns, encoding) val, test = None, None if val_path is not None: val, _ = cls._load_file(val_path, sep, header, columns, encoding) if test_path is not None: test, _ = cls._load_file(test_path, sep, header, columns, encoding) return cls(train=train, val=val, test=test, transform=transform, named_columns=cols) @registrable_factory @classmethod def autogen(cls, data_path: str, test_path: Optional[str] = None, seed: Optional[int] = None, test_ratio: Optional[float] = 0.2, val_ratio: Optional[float] = 0.2, sep: Optional[str] = '\t', header: Optional[str] = 'infer', columns: Optional[Union[List[str], List[int]]] = None, encoding: Optional[str] = 'utf-8', transform: Dict[str, Union[Field, Dict]] = None) -> 'TabularDataset': """Generate a test and validation set from the given file paths, then load a TabularDataset. Parameters ---------- data_path: str The path to the data test_path: Optional[str] The path to the test data seed: Optional[int] Random seed to be used in test/val generation test_ratio: Optional[float] The ratio of the test dataset in relation to the whole dataset. If `test_path` is specified, this field has no effect. val_ratio: Optional[float] The ratio of the validation dataset in relation to the training dataset (whole - test) sep: str Separator to pass to the `read_csv` method header: Optional[Union[str, int]] Use 0 for first line, None for no headers, and 'infer' to detect it automatically, defaults to 'infer' columns: List[str] List of columns to load, can be used to select a subset of columns, or change their order at loading time encoding: str The encoding format passed to the pandas reader transform: Dict[str, Union[Field, Dict]] The fields to be applied to the columns. Each field is identified with a name for easy linking. """ if ( columns and any(isinstance(c, int) for c in columns) and any(isinstance(c, str) for c in columns) ): raise ValueError("Columns parameters need to be all string or all integers.") data, cols = cls._load_file(data_path, sep=sep, header=header, columns=columns, encoding=encoding) train, val, test = None, None, None if test_path is not None: train, val = train_test_split(data, test_size=val_ratio, random_state=seed) test, _ = cls._load_file(test_path, sep=sep, header=header, columns=columns, encoding=encoding) else: train_val, test = train_test_split(data, test_size=test_ratio, random_state=seed) train, val = train_test_split(train_val, test_size=val_ratio, random_state=seed) return cls(train=train, val=val, test=test, transform=transform, named_columns=cols) @classmethod def _load_file(cls, path: str, sep: Optional[str] = '\t', header: Optional[str] = 'infer', columns: Optional[Union[List[str], List[int]]] = None, encoding: Optional[str] = 'utf-8') -> Tuple[List[Tuple], Optional[List[str]]]: """Load data from the given path. The path may be either a single file or a directory. If it is a directory, each file is loaded according to the specified options and all the data is concatenated into a single list. The files will be processed in order based on file name. Parameters ---------- path : str Path to data, could be a directory or a file sep: str Separator to pass to the `read_csv` method header: Optional[Union[str, int]] Use 0 for first line, None for no headers, and 'infer' to detect it automatically, defaults to 'infer' columns: Optional[Union[List[str], List[int]]] List of columns to load, can be used to select a subset of columns, or change their order at loading time encoding: str The encoding format passed to the pandas reader Returns ------- Tuple[List[Tuple], Optional[List[str]]] A tuple containing the list of examples (where each example is itself also a list or tuple of entries in the dataset) and an optional list of named columns (one string for each column in the dataset) """ # Get all paths if os.path.isdir(path): file_paths = [os.path.join(path, name) for name in os.listdir(path)] file_paths = sorted(file_paths) else: file_paths = [path] data: List = [] for file_path in file_paths: # Don't fail on buggy files try: examples = pd.read_csv(file_path, sep=sep, header=header, index_col=False, dtype=str, encoding=encoding, keep_default_na=False) # Select columns if columns is not None: examples = examples[columns] data.extend(examples.values.tolist()) except Exception as e: print("Warning: failed to load file {file_path}") print(e) if len(data) == 0: raise ValueError(f"No data found at {path}") # Take the named columns from the columns parameter # if they are strings or try to use the pd.DataFrame # column names if they are strings. named_cols: List[str] = [] if columns: for i, c in enumerate(columns): # type: ignore if isinstance(c, str): named_cols.append(c) elif all(isinstance(c, str) for c in examples.columns): named_cols = examples.columns.tolist() return data, named_cols if len(named_cols) > 0 else None @property def train(self) -> np.ndarray: """Returns the training data as a numpy nd array""" if self.train_view is None: self.train_view = DataView(self._train, self.transform_hooks, self.cache) return self.train_view @property def val(self) -> np.ndarray: """Returns the validation data as a numpy nd array""" if self.val_view is None: self.val_view = DataView(self._val, self.transform_hooks, self.cache) return self.val_view @property def test(self) -> np.ndarray: """Returns the test data as a numpy nd array""" if self.test_view is None: self.test_view = DataView(self._test, self.transform_hooks, self.cache) return self.test_view @property def raw(self) -> np.ndarray: """Returns all partitions of the data as a numpy nd array""" args = [self._train] if not self.val.is_empty(): args.append(self.val.raw) if not self.test.is_empty(): args.append(self.test.raw) return
np.concatenate(args, axis=0)
numpy.concatenate
import numpy as np import diversipy def test_sample(): X = diversipy.simplex.sample(dimension=3, n_points=1000) # points sum up to 1 assert np.allclose(X.sum(axis=1), 1) # points are bounded in each dimension by 0 <= x_i <= 1 assert np.min(X) >= 0 assert
np.min(X)
numpy.min
from abc import ABCMeta, abstractmethod import numpy as np class Activation(object): __metaclass__ = ABCMeta @abstractmethod def response(self, x): raise NotImplementedError() @abstractmethod def gradient(self, x): raise NotImplementedError() class Sigmoid(Activation): def response(self, x): return 1. / (1. + np.exp(-x)) def gradient(self, x): y = self.response(x) return y * (1. - y) class Linear(Activation): def response(self, x): return x def gradient(self, x): return 1. class Relu(Activation): def response(self, x): return np.maximum(0, x) def gradient(self, x): return 1. * (x > 0) class LeakyRelu(Activation): def response(self, x): return
np.maximum(0.01, x)
numpy.maximum
#%% import sys import os os.chdir(os.path.dirname(os.getcwd())) # make directory one step up the current directory sys.path.append('/Users/mwinding/repos/maggot_models') from pymaid_creds import url, name, password, token import pymaid rm = pymaid.CatmaidInstance(url, token, name, password) import matplotlib.pyplot as plt import seaborn as sns import numpy as np import pandas as pd # allows text to be editable in Illustrator plt.rcParams['pdf.fonttype'] = 42 plt.rcParams['ps.fonttype'] = 42 # font settings plt.rcParams['font.size'] = 5 plt.rcParams['font.family'] = 'arial' from src.data import load_metagraph from src.visualization import CLASS_COLOR_DICT, adjplot from src.traverse import Cascade, to_transmission_matrix from src.traverse import TraverseDispatcher from src.visualization import matrixplot import connectome_tools.cascade_analysis as casc import connectome_tools.celltype as ct import connectome_tools.process_matrix as pm adj_ad = pm.Promat.pull_adj(type_adj='ad', subgraph='brain') #%% # pull sensory annotations and then pull associated skids order = ['olfactory', 'gustatory-external', 'gustatory-pharyngeal', 'enteric', 'thermo-warm', 'thermo-cold', 'visual', 'noci', 'mechano-Ch', 'mechano-II/III', 'proprio', 'respiratory'] sens = [ct.Celltype(name, ct.Celltype_Analyzer.get_skids_from_meta_annotation(f'mw {name}')) for name in order] input_skids_list = [x.get_skids() for x in sens] input_skids = ct.Celltype_Analyzer.get_skids_from_meta_meta_annotation('mw brain sensory modalities') output_names = pymaid.get_annotated('mw brain outputs').name output_skids_list = list(map(pymaid.get_skids_by_annotation, pymaid.get_annotated('mw brain outputs').name)) output_skids = [val for sublist in output_skids_list for val in sublist] #%% # cascades from each sensory modality import pickle p = 0.05 max_hops = 10 n_init = 1000 simultaneous = True adj=adj_ad ''' input_hit_hist_list = casc.Cascade_Analyzer.run_cascades_parallel(source_skids_list=input_skids_list, source_names = order, stop_skids=output_skids, adj=adj_ad, p=p, max_hops=max_hops, n_init=n_init, simultaneous=simultaneous) pickle.dump(input_hit_hist_list, open('data/cascades/sensory-modality-cascades_1000-n_init.p', 'wb')) ''' input_hit_hist_list = pickle.load(open('data/cascades/sensory-modality-cascades_1000-n_init.p', 'rb')) # %% # plot sensory cascades raw fig, axs = plt.subplots(len(input_hit_hist_list), 1, figsize=(10, 20)) fig.tight_layout(pad=2.0) for i, hit_hist in enumerate(input_hit_hist_list): ax = axs[i] sns.heatmap(hit_hist.skid_hit_hist, ax=ax) ax.set_xlabel(hit_hist.get_name()) plt.savefig('cascades/plots/sensory_modality_signals.pdf', format='pdf', bbox_inches='tight') os.system('say "code executed"') # %% # how close are descending neurons to sensory? # load output types dVNC = pymaid.get_skids_by_annotation('mw dVNC') dSEZ = pymaid.get_skids_by_annotation('mw dSEZ') RGN = pymaid.get_skids_by_annotation('mw RGN') # generate Cascade_Analyzer objects containing name of pathway and the hit_hist to each output type dVNC_hits = [casc.Cascade_Analyzer(f'{hit_hist.get_name()}-dVNC', hit_hist.skid_hit_hist.loc[dVNC, :]) for hit_hist in input_hit_hist_list] dSEZ_hits = [casc.Cascade_Analyzer(f'{hit_hist.get_name()}-dSEZ', hit_hist.skid_hit_hist.loc[dSEZ, :]) for hit_hist in input_hit_hist_list] RGN_hits = [casc.Cascade_Analyzer(f'{hit_hist.get_name()}-RGN', hit_hist.skid_hit_hist.loc[RGN, :]) for hit_hist in input_hit_hist_list] dVNC_hits = [casc.Cascade_Analyzer([hit_hist.get_name(), 'dVNC'], hit_hist.skid_hit_hist.loc[dVNC, :]) for hit_hist in input_hit_hist_list] dSEZ_hits = [casc.Cascade_Analyzer([hit_hist.get_name(), 'dSEZ'], hit_hist.skid_hit_hist.loc[dSEZ, :]) for hit_hist in input_hit_hist_list] RGN_hits = [casc.Cascade_Analyzer([hit_hist.get_name(), 'RGN'], hit_hist.skid_hit_hist.loc[RGN, :]) for hit_hist in input_hit_hist_list] # max possible hits that all output neuron types could receive max_dVNC_hits = len(dVNC_hits[0].skid_hit_hist.index)*n_init max_dSEZ_hits = len(dVNC_hits[0].skid_hit_hist.index)*n_init max_RGN_hits = len(dVNC_hits[0].skid_hit_hist.index)*n_init # organize data so that each sens -> dVNC, dSEZ, RGN is intercalated sens_output_data = list(zip(dVNC_hits, dSEZ_hits, RGN_hits)) sens_output_data = [x for sublist in sens_output_data for x in sublist] sens_output_df = pd.DataFrame([x.skid_hit_hist.sum(axis=0) for x in sens_output_data]) # set up multiindex sens_output_df['source']=[x.get_name()[0] for x in sens_output_data] sens_output_df['target']=[x.get_name()[1] for x in sens_output_data] sens_output_df = sens_output_df.set_index(['source', 'target']) # normalize by max possible input to each output type (num neurons * n_init) sens_output_df_plot = sens_output_df.copy() sens_output_df_plot.loc[(slice(None), 'dVNC'), :] = sens_output_df_plot.loc[(slice(None), 'dVNC'), :]/max_dVNC_hits sens_output_df_plot.loc[(slice(None), 'dSEZ'), :] = sens_output_df_plot.loc[(slice(None), 'dSEZ'), :]/max_dSEZ_hits sens_output_df_plot.loc[(slice(None), 'RGN'), :] = sens_output_df_plot.loc[(slice(None), 'RGN'), :]/max_RGN_hits import cmasher as cmr fig, ax = plt.subplots(1, 1, figsize=(1.5, 2)) fig.tight_layout(pad=3.0) vmax = 0.35 cmap = cmr.torch sns.heatmap(sens_output_df_plot, ax = ax, cmap = cmap, vmax=vmax) ax.set_title('Signal to brain outputs') ax.set(xlim = (0, 11)) plt.savefig('cascades/plots/sensory_modality_signals_to_output.pdf', format='pdf', bbox_inches='tight') # determine mean/median hop distance from sens -> output def counts_to_list(count_list): expanded_counts = [] for i, count in enumerate(count_list): expanded = np.repeat(i, count) expanded_counts.append(expanded) return([x for sublist in expanded_counts for x in sublist]) all_sens_output_dist = [] for row in sens_output_df.iterrows(): list_hits = counts_to_list(row[1]) all_sens_output_dist.append([row[0][0], row[0][1], np.mean(list_hits), np.median(list_hits)]) all_sens_output_dist = pd.DataFrame(all_sens_output_dist, columns = ['source', 'target', 'mean_hop', 'median_hop']) # %% # plotting visits by modality to each descending to VNC neuron pair # supplemental figure dVNC_hits_summed = [pd.DataFrame(x.skid_hit_hist.iloc[:, 0:8].sum(axis=1), columns=[x.get_name()[0]]) for x in dVNC_hits] dVNC_hits_summed = pd.concat(dVNC_hits_summed, axis=1) dVNC_hits_pairwise = pm.Promat.convert_df_to_pairwise(dVNC_hits_summed) dSEZ_hits_summed = [pd.DataFrame(x.skid_hit_hist.iloc[:, 0:8].sum(axis=1), columns=[x.get_name()[0]]) for x in dSEZ_hits] dSEZ_hits_summed = pd.concat(dSEZ_hits_summed, axis=1) dSEZ_hits_pairwise = pm.Promat.convert_df_to_pairwise(dSEZ_hits_summed) RGN_hits_summed = [pd.DataFrame(x.skid_hit_hist.iloc[:, 0:8].sum(axis=1), columns=[x.get_name()[0]]) for x in RGN_hits] RGN_hits_summed = pd.concat(RGN_hits_summed, axis=1) RGN_hits_pairwise = pm.Promat.convert_df_to_pairwise(RGN_hits_summed) fig, axs = plt.subplots( 3, 1, figsize=(8, 8) ) fig.tight_layout(pad=3.0) ax = axs[0] ax.get_xaxis().set_visible(False) ax.set_title('Signal to Individual VNC Descending Neurons') sns.heatmap(dVNC_hits_pairwise.T, ax = ax) ax = axs[1] ax.get_xaxis().set_visible(False) ax.set_title('Signal to Individual SEZ Descending Neurons') sns.heatmap(dSEZ_hits_pairwise.T, ax = ax) ax = axs[2] ax.set_xlabel('Individual Ring Gland Neurons') ax.get_xaxis().set_visible(False) ax.set_title('Signal to Individual Ring Gland Neurons') sns.heatmap(RGN_hits_pairwise.T, ax = ax) plt.savefig('cascades/plots/signal_to_individual_outputs.pdf', format='pdf', bbox_inches='tight') #%% # alternative clustermap plot of descending neurons # supplemental figure plot vmax = n_init fig = sns.clustermap(dVNC_hits_pairwise.T, row_cluster = False, figsize = (8, 4), vmax=vmax) ax = fig.ax_heatmap ax.set_xlabel('Individual dVNCs') ax.set_xticks([]) fig.savefig('cascades/plots/signal_to_individual_dVNCs.pdf', format='pdf', bbox_inches='tight') fig = sns.clustermap(dSEZ_hits_pairwise.T, row_cluster = False, figsize = (8, 4), vmax=vmax) ax = fig.ax_heatmap ax.set_xlabel('Individual dSEZs') ax.set_xticks([]) fig.savefig('cascades/plots/signal_to_individual_dSEZs.pdf', format='pdf', bbox_inches='tight') fig = sns.clustermap(RGN_hits_pairwise.T, row_cluster = False, figsize = (8, 4), vmax=vmax) ax = fig.ax_heatmap ax.set_xlabel('Individual RG neurons') ax.set_xticks([]) fig.savefig('cascades/plots/signal_to_individual_RGs.pdf', format='pdf', bbox_inches='tight') # %% # distribution summary of signal to output neurons dVNC_dist = (dVNC_hits_pairwise.groupby('pair_id').sum()>=n_init).sum(axis=1) dSEZ_dist = (dSEZ_hits_pairwise.groupby('pair_id').sum()>=n_init).sum(axis=1) RGN_dist = (RGN_hits_pairwise.groupby('pair_id').sum()>=n_init).sum(axis=1) dist_data = pd.DataFrame(list(zip(dVNC_dist.values, ['dVNC']*len(dVNC_dist))) + list(zip(dSEZ_dist.values, ['dSEZ']*len(dSEZ_dist))) + list(zip(RGN_dist.values, ['RGN']*len(RGN_dist))), columns = ['combinations', 'type']) fig, ax = plt.subplots(1,1, figsize=(4,4)) sns.stripplot(data = dist_data, y = 'combinations', x='type', s=1, ax=ax) fig.savefig('cascades/plots/signal_to_outputs_dist.pdf', format='pdf', bbox_inches='tight') fig, ax = plt.subplots(1,1, figsize=(4,4)) sns.histplot(data = dVNC_dist-0.5, ax=ax, bins=len(sens)) fig.savefig('cascades/plots/signal_to_dVNC_dist.pdf', format='pdf', bbox_inches='tight') fig, ax = plt.subplots(1,1, figsize=(4,4)) sns.histplot(data = dSEZ_dist-0.5, ax=ax, bins=len(sens)) fig.savefig('cascades/plots/signal_to_dSEZ_dist.pdf', format='pdf', bbox_inches='tight') fig, ax = plt.subplots(1,1, figsize=(4,4)) sns.histplot(data = RGN_dist-0.5, ax=ax, bins=len(sens)) fig.savefig('cascades/plots/signal_to_RGN_dist.pdf', format='pdf', bbox_inches='tight') # %% # parallel coordinates plots from pandas.plotting import parallel_coordinates linewidth = 0.75 alpha = 0.8 very_low_color = '#D7DF23' low_color = '#C2DD26' med_color = '#8DC63F' high_color = '#00A651' data = dVNC_hits_pairwise.groupby('pair_id').sum() very_low = (dVNC_dist<=1) low = (dVNC_dist>1) & (dVNC_dist<4) med = (dVNC_dist>=4) & (dVNC_dist<8) high = dVNC_dist>=8 data['type'] = [0]*len(data.index) data.loc[high, 'type'] = ['high']*len(data.loc[high, 'type']) data.loc[med, 'type'] = ['med']*len(data.loc[med, 'type']) data.loc[low, 'type'] = ['low']*len(data.loc[low, 'type']) data.loc[very_low, 'type'] = ['very_low']*len(data.loc[very_low, 'type']) data = data.sort_values(by='type') fig, ax = plt.subplots(1,1, figsize=(4,4)) parallel_coordinates(data, class_column='type', color = [high_color, med_color, low_color, very_low_color], alpha=alpha, linewidth=linewidth) fig.savefig('cascades/plots/signal-to-dVNC_parallel-coordinates.pdf', format='pdf', bbox_inches='tight') data = dSEZ_hits_pairwise.groupby('pair_id').sum() very_low = (dSEZ_dist<=1) low = (dSEZ_dist>1) & (dSEZ_dist<4) med = (dSEZ_dist>=4) & (dSEZ_dist<8) high = dSEZ_dist>=8 data['type'] = [0]*len(data.index) data.loc[high, 'type'] = ['high']*len(data.loc[high, 'type']) data.loc[med, 'type'] = ['med']*len(data.loc[med, 'type']) data.loc[low, 'type'] = ['low']*len(data.loc[low, 'type']) data.loc[very_low, 'type'] = ['very_low']*len(data.loc[very_low, 'type']) data = data.sort_values(by='type') fig, ax = plt.subplots(1,1, figsize=(4,4)) parallel_coordinates(data, class_column='type', color = [high_color, low_color, med_color, very_low_color], alpha=alpha, linewidth=linewidth) fig.savefig('cascades/plots/signal-to-dSEZ_parallel-coordinates.pdf', format='pdf', bbox_inches='tight') data = RGN_hits_pairwise.groupby('pair_id').sum() very_low = (RGN_dist<=1) low = (RGN_dist>1) & (RGN_dist<4) med = (RGN_dist>=4) & (RGN_dist<8) high = RGN_dist>=8 data['type'] = [0]*len(data.index) data.loc[high, 'type'] = ['high']*len(data.loc[high, 'type']) data.loc[med, 'type'] = ['med']*len(data.loc[med, 'type']) data.loc[low, 'type'] = ['low']*len(data.loc[low, 'type']) data.loc[very_low, 'type'] = ['very_low']*len(data.loc[very_low, 'type']) data = data.sort_values(by='type') fig, ax = plt.subplots(1,1, figsize=(4,4)) parallel_coordinates(data, class_column='type', color = [high_color, low_color, very_low_color, med_color], alpha=alpha, linewidth=linewidth) fig.savefig('cascades/plots/signal-to-RGN_parallel-coordinates.pdf', format='pdf', bbox_inches='tight') # %% # PCA of descending input from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA data = dVNC_hits_pairwise.groupby('pair_id').sum() data['type'] = ['dVNC']*len(data) data2 = dSEZ_hits_pairwise.groupby('pair_id').sum() data2['type'] = ['dSEZ']*len(data2) data3 = RGN_hits_pairwise.groupby('pair_id').sum() data3['type'] = ['RGN']*len(data3) data = pd.concat([data, data2, data3]) x = data.drop(columns='type').values x = StandardScaler().fit_transform(x) pca = PCA(n_components=2) principalComponents = pca.fit_transform(x) principalDf = pd.DataFrame(data = principalComponents , columns = ['pc1', 'pc2'], index=data.index) principalDf['type'] = data['type'] ylim = (-2.25, 2.25) xlim = (-5, 6) size = 3 alpha = 0.75 # plot dVNC PCA plot_data = principalDf[principalDf.type=='dVNC'] low = (dVNC_dist<4) med = (dVNC_dist>=4) & (dVNC_dist<10) high = dVNC_dist>=10 plot_data.loc[high, 'type'] = ['high']*len(plot_data.loc[high, 'type']) plot_data.loc[med, 'type'] = ['med']*len(plot_data.loc[med, 'type']) plot_data.loc[low, 'type'] = ['low']*len(plot_data.loc[low, 'type']) fig, ax = plt.subplots(1,1,figsize=(2,2)) sns.scatterplot(data = plot_data, x='pc1', y='pc2', hue='type', hue_order = ['high', 'med', 'low'], s=size, linewidth=0, alpha=alpha, ax=ax) ax.set(xlim=xlim, ylim=ylim) fig.savefig('cascades/plots/signal-to-dVNC_PCA.pdf', format='pdf', bbox_inches='tight') # plot dSEZ PCA plot_data = principalDf[principalDf.type=='dSEZ'] low = (dSEZ_dist<4) med = (dSEZ_dist>=4) & (dSEZ_dist<10) high = dSEZ_dist>=10 plot_data.loc[high, 'type'] = ['high']*len(plot_data.loc[high, 'type']) plot_data.loc[med, 'type'] = ['med']*len(plot_data.loc[med, 'type']) plot_data.loc[low, 'type'] = ['low']*len(plot_data.loc[low, 'type']) fig, ax = plt.subplots(1,1,figsize=(2,2)) sns.scatterplot(data = plot_data, x='pc1', y='pc2', hue='type', hue_order = ['high', 'med', 'low'], s=size, linewidth=0, alpha=alpha, ax=ax) ax.set(xlim=xlim, ylim=ylim) fig.savefig('cascades/plots/signal-to-dSEZ_PCA.pdf', format='pdf', bbox_inches='tight') # plot RGN PCA plot_data = principalDf[principalDf.type=='RGN'] low = (RGN_dist<4) med = (RGN_dist>=4) & (RGN_dist<10) high = RGN_dist>=10 plot_data.loc[high, 'type'] = ['high']*len(plot_data.loc[high, 'type']) plot_data.loc[med, 'type'] = ['med']*len(plot_data.loc[med, 'type']) plot_data.loc[low, 'type'] = ['low']*len(plot_data.loc[low, 'type']) fig, ax = plt.subplots(1,1,figsize=(2,2)) sns.scatterplot(data = plot_data, x='pc1', y='pc2', hue='type', hue_order = ['high', 'med', 'low'], s=size, linewidth=0, alpha=alpha, ax=ax) ax.set(xlim=xlim, ylim=ylim) fig.savefig('cascades/plots/signal-to-RGN_PCA.pdf', format='pdf', bbox_inches='tight') # %% # bar plot of high, med, low categories for each type of output integration_data = [['dVNC', 'high', sum(dVNC_dist>=10)], ['dVNC', 'med', sum((dVNC_dist>=4) & (dVNC_dist<10))], ['dVNC', 'low', sum(dVNC_dist<4)], ['dSEZ', 'high', sum(dSEZ_dist>=10)], ['dSEZ', 'med', sum((dSEZ_dist>=4) & (dSEZ_dist<10))], ['dSEZ', 'low', sum(dSEZ_dist<4)], ['RGN', 'high', sum(RGN_dist>=10)], ['RGN', 'med', sum((RGN_dist>=4) & (RGN_dist<10))], ['RGN', 'low', sum(RGN_dist<4)]] integration_data = pd.DataFrame(integration_data, columns = ['class', 'type', 'count']) fig, ax = plt.subplots(1,1,figsize=(2,2)) sns.barplot(data = integration_data, x='class', y='count', hue='type', hue_order = ['high', 'med', 'low'], ax=ax) fig.savefig('cascades/plots/signal-integration-counts_dVNCs.pdf', format='pdf', bbox_inches='tight') # %% ########## # **** Note Well: REALLY old code below, deprecated or never used in paper **** ########## # %% # num of descendings at each level # this assumes that thresholding per node is useful; it might not be threshold = 50 num_dVNC_dsSens = pd.DataFrame(([np.array(dVNC_ORN_hit>threshold).sum(axis = 0), np.array(dVNC_AN_hit>threshold).sum(axis = 0), np.array(dVNC_MN_hit>threshold).sum(axis = 0), np.array(dVNC_A00c_hit>threshold).sum(axis = 0), np.array(dVNC_vtd_hit>threshold).sum(axis = 0), np.array(dVNC_thermo_hit>threshold).sum(axis = 0), np.array(dVNC_photo_hit>threshold).sum(axis = 0)]), index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo']) num_dSEZ_dsSens = pd.DataFrame(([np.array(dSEZ_ORN_hit>threshold).sum(axis = 0), np.array(dSEZ_AN_hit>threshold).sum(axis = 0), np.array(dSEZ_MN_hit>threshold).sum(axis = 0), np.array(dSEZ_A00c_hit>threshold).sum(axis = 0), np.array(dSEZ_vtd_hit>threshold).sum(axis = 0), np.array(dSEZ_thermo_hit>threshold).sum(axis = 0), np.array(dSEZ_photo_hit>threshold).sum(axis = 0)]), index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo']) num_RG_dsSens = pd.DataFrame(([np.array(RG_ORN_hit>threshold).sum(axis = 0), np.array(RG_AN_hit>threshold).sum(axis = 0), np.array(RG_MN_hit>threshold).sum(axis = 0), np.array(RG_A00c_hit>threshold).sum(axis = 0), np.array(RG_vtd_hit>threshold).sum(axis = 0), np.array(RG_thermo_hit>threshold).sum(axis = 0), np.array(RG_photo_hit>threshold).sum(axis = 0)]), index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo']) fig, axs = plt.subplots( 3, 1, figsize=(8, 8) ) fig.tight_layout(pad=3.0) vmax = 50 cmap = cmr.heat ax = axs[0] ax.set_title('Number of VNC Descending Neurons downstream of Sensory Signal') sns.heatmap(num_dVNC_dsSens, ax = ax, vmax = vmax, rasterized=True, cmap = cmap) ax.set(xlim = (0, 13)) ax = axs[1] ax.set_title('Number of SEZ Descending Neurons downstream of Sensory Signal') sns.heatmap(num_dSEZ_dsSens, ax = ax, vmax = vmax, rasterized=True, cmap = cmap) ax.set(xlim = (0, 13)) ax = axs[2] ax.set_title('Number of Ring Gland Neurons downstream of Sensory Signal') sns.heatmap(num_RG_dsSens, ax = ax, vmax = vmax, rasterized=True, cmap = cmap) ax.set_xlabel('Hops from sensory') ax.set(xlim = (0, 13)) plt.savefig('cascades/plots/number_outputs_ds_each_sensory_modality.pdf', format='pdf', bbox_inches='tight') # %% # When modality are each outputs associated with? dVNC_hits = pd.DataFrame(([ dVNC_skids, dVNC_ORN_hit.sum(axis = 1), dVNC_AN_hit.sum(axis = 1), dVNC_MN_hit.sum(axis = 1), dVNC_thermo_hit.sum(axis = 1), dVNC_photo_hit.sum(axis = 1), dVNC_A00c_hit.sum(axis = 1), dVNC_vtd_hit.sum(axis = 1)]), index = ['dVNC_skid', 'ORN', 'AN', 'MN', 'thermo', 'photo', 'A00c', 'vtd']) dVNC_hits = dVNC_hits.T dSEZ_hits = pd.DataFrame(([ dSEZ_skids, dSEZ_ORN_hit.sum(axis = 1), dSEZ_AN_hit.sum(axis = 1), dSEZ_MN_hit.sum(axis = 1), dSEZ_thermo_hit.sum(axis = 1), dSEZ_photo_hit.sum(axis = 1), dSEZ_A00c_hit.sum(axis = 1), dSEZ_vtd_hit.sum(axis = 1)]), index = ['dSEZ_skid', 'ORN', 'AN', 'MN', 'thermo', 'photo', 'A00c', 'vtd']) dSEZ_hits = dSEZ_hits.T RG_hits = pd.DataFrame(([ RG_skids, RG_ORN_hit.sum(axis = 1), RG_AN_hit.sum(axis = 1), RG_MN_hit.sum(axis = 1), RG_thermo_hit.sum(axis = 1), RG_photo_hit.sum(axis = 1), RG_A00c_hit.sum(axis = 1), RG_vtd_hit.sum(axis = 1)]), index = ['RG_skid', 'ORN', 'AN', 'MN', 'thermo', 'photo', 'A00c', 'vtd']) RG_hits = RG_hits.T # %% # sensory characterization of each layer of each sensory modality import plotly.express as px from pandas.plotting import parallel_coordinates # replacement if I want to use this later #sensory_profiles = [hit_hist.skid_hit_hist.sum(axis=1).values for hit_hist in input_hit_hist_list] #sensory_profiles = pd.DataFrame(sensory_profiles, index=[hit_hist.get_name() for hit_hist in input_hit_hist_list], columns = input_hit_hist_list[0].skid_hit_hist.index) sensory_profile = pd.DataFrame(([ORN_hit_hist.sum(axis = 1), AN_hit_hist.sum(axis = 1), MN_hit_hist.sum(axis = 1), A00c_hit_hist.sum(axis = 1), vtd_hit_hist.sum(axis = 1), thermo_hit_hist.sum(axis = 1), photo_hit_hist.sum(axis = 1)]), index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo']) sensory_profile0 = pd.DataFrame(([ORN_hit_hist[:, 0], AN_hit_hist[:, 0], MN_hit_hist[:, 0], A00c_hit_hist[:, 0], vtd_hit_hist[:, 0], thermo_hit_hist[:, 0], photo_hit_hist[:, 0]]), index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo']) sensory_profile1 = pd.DataFrame(([ORN_hit_hist[:, 1], AN_hit_hist[:, 1], MN_hit_hist[:, 1], A00c_hit_hist[:, 1], vtd_hit_hist[:, 1], thermo_hit_hist[:, 1], photo_hit_hist[:, 1]]), index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo']) sensory_profile2 = pd.DataFrame(([ORN_hit_hist[:, 2], AN_hit_hist[:, 2], MN_hit_hist[:, 2], A00c_hit_hist[:, 2], vtd_hit_hist[:, 2], thermo_hit_hist[:, 2], photo_hit_hist[:, 2]]), index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo']) sensory_profile3 = pd.DataFrame(([ORN_hit_hist[:, 3], AN_hit_hist[:, 3], MN_hit_hist[:, 3], A00c_hit_hist[:, 3], vtd_hit_hist[:, 3], thermo_hit_hist[:, 3], photo_hit_hist[:, 3]]), index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo']) sensory_profile4 = pd.DataFrame(([ORN_hit_hist[:, 4], AN_hit_hist[:, 4], MN_hit_hist[:, 4], A00c_hit_hist[:, 4], vtd_hit_hist[:, 4], thermo_hit_hist[:, 4], photo_hit_hist[:, 4]]), index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo']) sensory_profile5 = pd.DataFrame(([ORN_hit_hist[:, 5], AN_hit_hist[:, 5], MN_hit_hist[:, 5], A00c_hit_hist[:, 5], vtd_hit_hist[:, 5], thermo_hit_hist[:, 5], photo_hit_hist[:, 5]]), index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo']) sensory_profile6 = pd.DataFrame(([ORN_hit_hist[:, 6], AN_hit_hist[:, 6], MN_hit_hist[:, 6], A00c_hit_hist[:, 6], vtd_hit_hist[:, 6], thermo_hit_hist[:, 6], photo_hit_hist[:, 6]]), index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo']) sensory_profile7 = pd.DataFrame(([ORN_hit_hist[:, 7], AN_hit_hist[:, 7], MN_hit_hist[:, 7], A00c_hit_hist[:, 7], vtd_hit_hist[:, 7], thermo_hit_hist[:, 7], photo_hit_hist[:, 7]]), index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo']) sensory_profile8 = pd.DataFrame(([ORN_hit_hist[:, 8], AN_hit_hist[:, 8], MN_hit_hist[:, 8], A00c_hit_hist[:, 8], vtd_hit_hist[:, 8], thermo_hit_hist[:, 8], photo_hit_hist[:, 8]]), index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo']) sensory_profile = sensory_profile.T sensory_profile0 = sensory_profile0.T sensory_profile1 = sensory_profile1.T sensory_profile2 = sensory_profile2.T sensory_profile3 = sensory_profile3.T sensory_profile4 = sensory_profile4.T sensory_profile5 = sensory_profile5.T sensory_profile6 = sensory_profile6.T sensory_profile7 = sensory_profile7.T sensory_profile8 = sensory_profile8.T #%% # multisensory elements per layer (apples to apples) threshold = 25 ORN0_indices = np.where(ORN_hit_hist[:, 0]>threshold)[0] ORN1_indices = np.where(ORN_hit_hist[:, 1]>threshold)[0] ORN2_indices = np.where(ORN_hit_hist[:, 2]>threshold)[0] ORN3_indices = np.where(ORN_hit_hist[:, 3]>threshold)[0] ORN4_indices = np.where(ORN_hit_hist[:, 4]>threshold)[0] ORN5_indices = np.where(ORN_hit_hist[:, 5]>threshold)[0] ORN6_indices = np.where(ORN_hit_hist[:, 6]>threshold)[0] ORN7_indices = np.where(ORN_hit_hist[:, 7]>threshold)[0] ORN8_indices = np.where(ORN_hit_hist[:, 8]>threshold)[0] AN0_indices = np.where(AN_hit_hist[:, 0]>threshold)[0] AN1_indices = np.where(AN_hit_hist[:, 1]>threshold)[0] AN2_indices = np.where(AN_hit_hist[:, 2]>threshold)[0] AN3_indices = np.where(AN_hit_hist[:, 3]>threshold)[0] AN4_indices = np.where(AN_hit_hist[:, 4]>threshold)[0] AN5_indices = np.where(AN_hit_hist[:, 5]>threshold)[0] AN6_indices = np.where(AN_hit_hist[:, 6]>threshold)[0] AN7_indices = np.where(AN_hit_hist[:, 7]>threshold)[0] AN8_indices = np.where(AN_hit_hist[:, 8]>threshold)[0] MN0_indices = np.where(MN_hit_hist[:, 0]>threshold)[0] MN1_indices = np.where(MN_hit_hist[:, 1]>threshold)[0] MN2_indices = np.where(MN_hit_hist[:, 2]>threshold)[0] MN3_indices = np.where(MN_hit_hist[:, 3]>threshold)[0] MN4_indices = np.where(MN_hit_hist[:, 4]>threshold)[0] MN5_indices = np.where(MN_hit_hist[:, 5]>threshold)[0] MN6_indices = np.where(MN_hit_hist[:, 6]>threshold)[0] MN7_indices = np.where(MN_hit_hist[:, 7]>threshold)[0] MN8_indices = np.where(MN_hit_hist[:, 8]>threshold)[0] A00c0_indices = np.where(A00c_hit_hist[:, 0]>threshold)[0] A00c1_indices = np.where(A00c_hit_hist[:, 1]>threshold)[0] A00c2_indices = np.where(A00c_hit_hist[:, 2]>threshold)[0] A00c3_indices = np.where(A00c_hit_hist[:, 3]>threshold)[0] A00c4_indices = np.where(A00c_hit_hist[:, 4]>threshold)[0] A00c5_indices = np.where(A00c_hit_hist[:, 5]>threshold)[0] A00c6_indices = np.where(A00c_hit_hist[:, 6]>threshold)[0] A00c7_indices = np.where(A00c_hit_hist[:, 7]>threshold)[0] A00c8_indices = np.where(A00c_hit_hist[:, 8]>threshold)[0] vtd0_indices = np.where(vtd_hit_hist[:, 0]>threshold)[0] vtd1_indices = np.where(vtd_hit_hist[:, 1]>threshold)[0] vtd2_indices = np.where(vtd_hit_hist[:, 2]>threshold)[0] vtd3_indices = np.where(vtd_hit_hist[:, 3]>threshold)[0] vtd4_indices = np.where(vtd_hit_hist[:, 4]>threshold)[0] vtd5_indices = np.where(vtd_hit_hist[:, 5]>threshold)[0] vtd6_indices =
np.where(vtd_hit_hist[:, 6]>threshold)
numpy.where
import numpy as np import krikos.nn.utils as utils class Layer(object): def __init__(self): super(Layer, self).__init__() self.params = {} self.cache = {} self.grads = {} def forward(self, input): raise NotImplementedError def backward(self, dout): raise NotImplementedError class Linear(Layer): def __init__(self, input_dim, output_dim): super(Linear, self).__init__() self.params["W"] = np.random.randn(input_dim, output_dim) * 0.01 self.params["b"] = np.zeros(output_dim) def forward(self, input): output = np.matmul(input, self.params["W"]) + self.params["b"] self.cache["input"] = input return output def backward(self, dout): input = self.cache["input"] self.grads["W"] = np.matmul(input.T, dout) self.grads["b"] = np.sum(dout, axis=0) dout = np.matmul(dout, self.params["W"].T) return dout class Convolutional(Layer): def __init__(self, channels, num_filters, kernel_size, stride=1, pad=0): super(Convolutional, self).__init__() self.params["W"] = np.random.randn(num_filters, channels, kernel_size, kernel_size) * 0.01 self.params["b"] = np.zeros(num_filters) self.stride = stride self.pad = pad self.F = num_filters self.HH, self.WW = kernel_size, kernel_size def forward(self, input): N, C, H, W = input.shape F, HH, WW = self.F, self.HH, self.WW stride, pad = self.stride, self.pad H_prime = 1 + (H + 2 * pad - HH) / stride W_prime = 1 + (W + 2 * pad - WW) / stride assert H_prime.is_integer() and W_prime.is_integer(), 'Invalid filter dimension' H_prime, W_prime = int(H_prime), int(W_prime) out = np.zeros((N, F, H_prime, W_prime)) filters = self.params["W"].reshape(F, C * HH * WW) x_pad = np.pad(input, pad_width=((0, 0), (0, 0), (pad, pad), (pad, pad)), mode='constant', constant_values=0) for i in range(H_prime): h_start = i * stride h_end = h_start + HH for j in range(W_prime): w_start = j * stride w_end = w_start + WW kernel = x_pad[:, :, h_start:h_end, w_start:w_end] kernel = kernel.reshape(N, C * HH * WW) conv = np.matmul(kernel, filters.T) + self.params["b"] out[:, :, i, j] = conv self.cache["input"] = input return out def backward(self, dout): input = self.cache["input"] stride, pad = self.stride, self.pad N, C, H, W = input.shape F, HH, WW = self.F, self.HH, self.WW _, _, H_prime, W_prime = dout.shape H_pad, W_pad = H + 2 * pad, W + 2 * pad dx = np.zeros((N, C, H_pad, W_pad)) dw = np.zeros_like(self.params["W"]) db = np.sum(dout, axis=(0, 2, 3)) filters = self.params["W"].reshape(F, C * HH * WW) x_pad = np.pad(input, pad_width=((0, 0), (0, 0), (pad, pad), (pad, pad)), mode='constant', constant_values=0) for i in range(H_prime): h_start = i * stride h_end = h_start + HH for j in range(W_prime): w_start = j * stride w_end = w_start + WW piece = dout[:, :, i, j] x_piece = x_pad[:, :, h_start:h_end, w_start:w_end].reshape(N, C * HH * WW) dx_piece = np.matmul(piece, filters) dw_piece = np.matmul(piece.T, x_piece) dx[:, :, h_start:h_end, w_start:w_end] += dx_piece.reshape(N, C, HH, WW) dw += dw_piece.reshape(F, C, HH, WW) dx = dx[:, :, pad:H_pad - pad, pad:W_pad - pad] self.grads["W"], self.grads["b"] = dw, db return dx class MaxPooling(Layer): def __init__(self, kernel_size, stride=1, pad=0): super(MaxPooling, self).__init__() self.stride = stride self.pad = pad self.HH, self.WW = kernel_size, kernel_size def forward(self, input): N, C, H, W = input.shape HH, WW, stride = self.HH, self.WW, self.stride H_prime = (H - HH) / stride + 1 W_prime = (W - WW) / stride + 1 out = np.zeros((N, C, H_prime, W_prime)) if not H_prime.is_integer() or not W_prime.is_integer(): raise Exception('Invalid filter dimension') H_prime, W_prime = int(H_prime), int(W_prime) for i in range(H_prime): h_start = i * stride h_end = h_start + HH for j in range(W_prime): w_start = j * stride w_end = w_start + WW kernel = input[:, :, h_start:h_end, w_start:w_end] kernel = kernel.reshape(N, C, HH * WW) max = np.max(kernel, axis=2) out[:, :, i, j] = max self.cache['input'] = input return out def backward(self, dout): input = self.cache['input'] N, C, H, W = input.shape HH, WW, stride = self.HH, self.WW, self.stride H_prime = int((H - HH) / stride + 1) W_prime = int((W - WW) / stride + 1) dx = np.zeros_like(input) for i in range(H_prime): h_start = i * stride h_end = h_start + HH for j in range(W_prime): w_start = j * stride w_end = w_start + WW max = dout[:, :, i, j] kernel = input[:, :, h_start:h_end, w_start:w_end] kernel = kernel.reshape(N, C, HH * WW) indeces = np.argmax(kernel, axis=2) grads = np.zeros_like(kernel) for n in range(N): for c in range(C): grads[n, c, indeces[n, c]] = max[n, c] dx[:, :, h_start:h_end, w_start:w_end] += grads.reshape(N, C, HH, WW) return dx class Flatten(Layer): def __init__(self): super(Flatten, self).__init__() def forward(self, input): self.cache["shape"] = input.shape return input.reshape(input.shape[0], -1) def backward(self, dout): return dout.reshape(self.cache["shape"]) # ACTIVATIONS class ReLU(Layer): def __init__(self): super(ReLU, self).__init__() def forward(self, input): mask = input >= 0 self.cache["mask"] = mask input[~mask] = 0 return input def backward(self, dout): mask = self.cache["mask"] dout = dout * mask return dout # REGULARIZATION class BatchNorm(Layer): def __init__(self, dim, epsilon=1e-5, momentum=0.9): super(BatchNorm, self).__init__() self.params['gamma'] = np.ones(dim) self.params['beta'] = np.zeros(dim) self.running_mean, self.running_var = np.zeros(dim), np.zeros(dim) self.epsilon, self.momentum = epsilon, momentum self.mode = "train" def forward(self, input): gamma, beta = self.params['gamma'], self.params['beta'] running_mean, running_var = self.running_mean, self.running_var epsilon, momentum = self.epsilon, self.momentum if self.mode == 'train': mean, var = np.mean(input, axis=0), np.var(input, axis=0) norm = (input - mean) / np.sqrt(var + epsilon) output = gamma * norm + beta running_mean = momentum * running_mean + (1 - momentum) * mean running_var = momentum * running_var + (1 - momentum) * var self.running_mean, self.running_var = running_mean, running_var self.cache['input'], self.cache['norm'], self.cache['mean'], self.cache['var'] = input, norm, mean, var else: norm = (input - running_mean) / np.sqrt(running_var) output = gamma * norm + beta return output def backward(self, dout): input, norm, mean, var = self.cache['input'], self.cache['norm'], self.cache['mean'], self.cache['var'] gamma, beta = self.params['gamma'], self.params['beta'] epsilon = self.epsilon N, _ = dout.shape self.grads['beta'] = np.sum(dout, axis=0) self.grads['gamma'] = np.sum(dout * norm, axis=0) dshift1 = 1 / (np.sqrt(var + epsilon)) * dout * gamma dshift2 = np.sum((input - mean) * dout * gamma, axis=0) dshift2 = (-1 / (var + epsilon)) * dshift2 dshift2 = (0.5 /
np.sqrt(var + epsilon)
numpy.sqrt
# Copyright (c) 2017 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division, print_function, absolute_import import typing from functools import partial import numpy as np from nnef_tools.io.caffe2 import caffe2_pb from nnef_tools.io.caffe2.caffe2_graph import * from nnef_tools.core import utils, graph_utils from nnef_tools.shape_inference import shape_inference as infer ShapeResult = typing.Union[ typing.Tuple[typing.List[int], str], typing.Tuple[typing.Tuple[typing.List[int], ...], typing.Tuple[str, ...]] ] DTYPE_FLOAT = 'FLOAT' DTYPE_BOOL = 'BOOL' DTYPE_INT32 = 'INT32' DTYPE_INT64 = 'INT64' DTYPE_UINT8 = 'UINT8' def caffe2_pads_to_nnef_padding(pads): assert len(pads) % 2 == 0 return list(zip(pads[:len(pads) // 2], pads[len(pads) // 2:])) def nnef_padding_to_caffe2_pads(padding): ps, qs = utils.zip_inverse(2, padding) return ps + qs def flatten_to_2d(shape, axis): return [utils.product(shape[:axis]), utils.product(shape[axis:])] # Shape inference def one_element_0d_shape(op, dtype=None): # type: (Caffe2Operation, str)->ShapeResult return [], dtype if dtype is not None else op.inputs[0].dtype def one_element_1d_shape(op, dtype=None): # type: (Caffe2Operation, str)->ShapeResult return [1], dtype if dtype is not None else op.inputs[0].dtype def first_input_shape(op, n=1, dtype=None): # type: (Caffe2Operation, typing.Union[int, str], str)->ShapeResult if n == 'auto': n = len(op.outputs) assert n == len(op.outputs) if n == 1: return op.inputs[0].shape, op.inputs[0].dtype if dtype is None else dtype else: return tuple(i.shape for i in op.inputs[:n]), tuple(i.dtype if dtype is None else dtype for i in op.inputs[:n]) def shape_shape(op): # type: (Caffe2Operation)->ShapeResult graph_utils.replace_tensor_in_consumers(op.graph, op.output, Caffe2Tensor(graph=op.graph, shape=[op.input.rank], data=np.array(op.input.shape, dtype=np.int64), dtype=DTYPE_INT64), remove=False) return [op.input.rank], DTYPE_INT64 def prepend_dim_shape(op): # type: (Caffe2Operation)->ShapeResult shape = op.input.shape dim_size = op.attribs['dim_size'] assert shape[0] % dim_size == 0 return [dim_size] + [shape[0] // dim_size] + shape[1:], op.input.dtype def arg_min_max_shape(op, dtype=None): # type: (Caffe2Operation, typing.Optional[str])->ShapeResult axis = op.attribs.get('axis', -1) keep_dims = op.attribs.get('keepdims', 1) return infer.reduce(op.inputs[0].shape, axes=[axis], squeeze=not keep_dims), \ op.inputs[0].dtype if dtype is None else dtype def reduce_shape(op, dtype=None): # type: (Caffe2Operation, typing.Optional[str])->ShapeResult axes = op.attribs['axes'] keep_dims = op.attribs['keepdims'] return infer.reduce(op.inputs[0].shape, axes=axes, squeeze=not keep_dims), \ op.inputs[0].dtype if dtype is None else dtype def no_output_shape(op): # type: (Caffe2Operation)->ShapeResult return tuple(), tuple() def conv_shape(op): # type: (Caffe2Operation)->ShapeResult is_nhwc = op.attribs.get('order', 'NCHW').upper() == 'NHWC' return infer.conv(input=op.inputs[0].shape, filter=op.inputs[1].shape[1:-1] if is_nhwc else op.inputs[1].shape[2:], padding=caffe2_pads_to_nnef_padding(op.attribs['pads']), stride=op.attribs['strides'], dilation=op.attribs['dilations'], groups=op.attribs['group'], format=(infer.Format.NHWC if is_nhwc else infer.Format.NCHW), output_channels=op.inputs[1].shape[0]), op.inputs[0].dtype def conv_transpose_shape(op): # type: (Caffe2Operation)->ShapeResult is_nhwc = op.attribs.get('order', 'NCHW').upper() == 'NHWC' return infer.conv(input=op.inputs[0].shape, filter=op.inputs[1].shape[1:-1] if is_nhwc else op.inputs[1].shape[2:], padding=caffe2_pads_to_nnef_padding(op.attribs['pads']), stride=op.attribs['strides'], dilation=[1] * (op.inputs[0].rank - 2), groups=op.attribs['group'], format=(infer.Format.NHWC if is_nhwc else infer.Format.NCHW), output_channels=op.inputs[1].shape[-1 if is_nhwc else 1] * op.attribs['group'], output_padding=[(0, a) for a in op.attribs['adjs']], deconv=True), op.inputs[0].dtype def pool_shape(op): # type: (Caffe2Operation)->ShapeResult is_nhwc = op.attribs.get('order', 'NCHW').upper() == 'NHWC' if op.attribs['global_pooling']: return infer.reduce( op.inputs[0].shape, axes=list(range(1, op.inputs[0].rank - 1)) if is_nhwc else list(range(2, op.inputs[0].rank)), squeeze=False), op.inputs[0].dtype def expand(list, default): if is_nhwc: return [default] + list + [default] else: return [default, default] + list return infer.sliding_window(input=op.inputs[0].shape, filter=expand(op.attribs['kernels'], 1), padding=expand(caffe2_pads_to_nnef_padding(op.attribs['pads']), (0, 0)), stride=expand(op.attribs['strides'], 1), dilation=expand(op.attribs['dilations'], 1)), op.inputs[0].dtype def max_pool_with_index_shape(op): # type: (Caffe2Operation)->ShapeResult shape, dtype = pool_shape(op) return (shape, shape), (dtype, DTYPE_INT32) def lrn_shape(op): # type: (Caffe2Operation)->ShapeResult return (op.inputs[0].shape,) * len(op.outputs), (op.inputs[0].dtype,) * len(op.outputs) def concat_shape(op): # type: (Caffe2Operation)->ShapeResult if op.attribs['add_axis']: output_shape = infer.stack([input.shape for input in op.inputs], axis=op.attribs['axis']) else: output_shape = infer.concat([input.shape for input in op.inputs], axis=op.attribs['axis']) graph_utils.replace_tensor_in_consumers( op.graph, op.outputs[1], Caffe2Tensor(graph=op.graph, shape=[len(op.inputs)], data=np.array([input.shape[op.attribs['axis']] for input in op.inputs], dtype=np.int32), dtype=DTYPE_INT32), remove=False) return (output_shape, [len(op.inputs)]), (op.inputs[0].dtype, DTYPE_INT32) def dropout_shape(op): # type: (Caffe2Operation)->ShapeResult if not op.attribs.get('is_test', 0): raise utils.NNEFToolsException("Dropout: only is_test=1 is supported.") return (op.inputs[0].shape,) * len(op.outputs), (op.inputs[0].dtype,) * len(op.outputs) def bbox_transform_shape(op): # type: (Caffe2Operation)->ShapeResult rois, deltas, im_info = op.inputs M, fourK = deltas.shape N, _ = im_info.shape if len(op.outputs) == 1: return [M, fourK], op.inputs[0].dtype elif len(op.outputs) == 2: return ([M, fourK], [N]), (op.inputs[0].dtype, op.inputs[0].dtype) else: assert False def batch_matmul_shape(op): # type: (Caffe2Operation)->ShapeResult A, B = op.inputs A_shape = A.shape B_shape = B.shape if op.attribs.get('trans_a'): A_shape = A_shape[:-2] + list(reversed(A_shape[-2:])) if op.attribs.get('trans_b'): B_shape = B_shape[:-2] + list(reversed(B_shape[-2:])) if len(A_shape) == 1: A_shape = [None, A_shape[0]] if len(B_shape) == 1: B_shape = [B_shape[0], None] rank = max(len(A_shape), len(B_shape)) A_shape = [1] * (rank - len(A_shape)) + A_shape B_shape = [1] * (rank - len(B_shape)) + B_shape assert all(a == b or a == 1 or b == 1 for a, b in zip(A_shape[:-2], B_shape[:-2])) assert A_shape[-1] == B_shape[-2] shape = utils.without_none([max(a, b) for a, b in zip(A_shape[:-2], B_shape[:-2])] + [A_shape[-2], B_shape[-1]]) if not shape: shape = [1] return shape, op.inputs[0].dtype def fc_shape(op, transposed=False): # type: (Caffe2Operation, bool)->ShapeResult X, W, b = op.inputs axis = op.attribs.get('axis', 1) axis_w = op.attribs.get('axis_w', 1) if not transposed: shape = X.shape[:axis] + [utils.product(W.shape[:axis_w])] else: shape = X.shape[:axis] + [utils.product(W.shape[axis_w:])] return shape, op.inputs[0].dtype def matmul_shape(op): # type: (Caffe2Operation)->ShapeResult assert len(op.inputs) == 2 A, B = op.inputs axis_a = op.attribs.get('axis_a', 1) axis_b = op.attribs.get('axis_b', 1) trans_a = op.attribs.get('trans_a', 0) trans_b = op.attribs.get('trans_b', 0) return infer.matmul(flatten_to_2d(A.shape, axis_a), flatten_to_2d(B.shape, axis_b), transpose_a=trans_a, transpose_b=trans_b), \ op.inputs[0].dtype def brg_nchw_c_to_packed_int8_bgra_stylizer_deprocess_shape(op): # type: (Caffe2Operation)->ShapeResult N, C, H, W = op.inputs[0].shape return [N, H, W, 4], DTYPE_UINT8 def packed_int8_bgra_nhwc_to_nchw_c_stylizer_preprocess_shape(op): # type: (Caffe2Operation)->ShapeResult N, H, W, C = op.inputs[0].shape return [N, 3, H, W], DTYPE_FLOAT def cast_shape(op): # type: (Caffe2Operation)->ShapeResult dest_type_id = op.attribs['to'] return op.inputs[0].shape, caffe2_pb.dtype_id_to_name(dest_type_id) def conditional_shape(op): # type: (Caffe2Operation)->ShapeResult cond, true_value, false_value = op.inputs return true_value.shape, true_value.dtype def split_shape(op): # type: (Caffe2Operation)->ShapeResult if len(op.inputs) == 1: sizes = op.attribs['split'] elif len(op.inputs) == 2: if op.inputs[1].data is None: raise utils.NNEFToolsException('Split is not supported with calculated sizes.') sizes = op.inputs[1].data.tolist() op.attribs['split'] = sizes else: assert False op.inputs = (op.inputs[0],) output_shapes = tuple(infer.split(input=op.inputs[0].shape, axis=op.attribs['axis'], sizes=sizes)) return output_shapes, (op.inputs[0].dtype,) * len(output_shapes) def reshape_shape(op): # type: (Caffe2Operation)->ShapeResult if len(op.inputs) == 1: shape = op.attribs['shape'] elif len(op.inputs) == 2: if op.inputs[1].data is None: raise utils.NNEFToolsException('Reshape is not supported with calculated shape.') shape = op.inputs[1].data.tolist() else: assert False graph_utils.replace_tensor_in_consumers(op.graph, op.outputs[1], Caffe2Tensor(graph=op.graph, shape=[op.inputs[0].rank], data=np.array(op.inputs[0].shape, dtype=np.int64), dtype=DTYPE_INT64), remove=False) op.attribs['shape'] = shape op.inputs = (op.inputs[0],) return (infer.reshape(op.inputs[0].shape, shape=shape, zero_means_same=True), [op.inputs[0].rank]), \ (op.inputs[0].dtype, DTYPE_INT64) def resize_like_shape(op): # type: (Caffe2Operation)->ShapeResult return op.inputs[1].shape, op.inputs[0].dtype def squeeze_shape(op): # type: (Caffe2Operation)->ShapeResult return infer.squeeze(op.inputs[0].shape, axes=op.attribs['dims']), op.inputs[0].dtype def only_batch_shape(op): # type: (Caffe2Operation)->ShapeResult return [op.inputs[0].shape[0]], op.inputs[0].dtype def dot_product_with_padding_shape(op): # type: (Caffe2Operation)->ShapeResult return [max(op.inputs[0].shape[0], op.inputs[1].shape[0])], op.inputs[0].dtype def expand_dims_shape(op): # type: (Caffe2Operation)->ShapeResult return infer.unsqueeze(op.inputs[0].shape, axes=op.attribs['dims']), op.inputs[0].dtype def flatten_shape(op): # type: (Caffe2Operation)->ShapeResult axis = op.attribs.get('axis', 1) return flatten_to_2d(op.inputs[0].shape, axis), op.inputs[0].dtype def flatten_to_vec_shape(op): # type: (Caffe2Operation)->ShapeResult return [utils.product(op.inputs[0].shape)], op.inputs[0].dtype def generate_proposals_shape(op): # type: (Caffe2Operation)->ShapeResult n = 1 # not precise return ([n, 5], [n]), (op.inputs[0].dtype, op.inputs[0].dtype) def glu_shape(op): # type: (Caffe2Operation)->ShapeResult shape = list(op.input.shape) shape[-1] //= 2 return shape, op.input.dtype def instance_norm_shape(op): # type: (Caffe2Operation)->ShapeResult if len(op.outputs) == 1: return op.inputs[0].shape, op.inputs[0].dtype else: N = op.inputs[0].shape[0] C = op.inputs[1].shape[0] return (op.inputs[0].shape, [N, C], [N, C]), (op.inputs[0].dtype,) * 3 def box_with_nms_limit_shape(op): # type: (Caffe2Operation)->ShapeResult _count, num_classes = op.inputs[0].shape n = 1 # not precise shapes = ([n], [n, 4], [n], [n], [n], [num_classes]) dtypes = (DTYPE_FLOAT, DTYPE_FLOAT, DTYPE_FLOAT, DTYPE_FLOAT, DTYPE_INT32, DTYPE_INT32) return shapes[:len(op.outputs)], dtypes[:len(op.outputs)] def layer_norm_shape(op): # type: (Caffe2Operation)->ShapeResult axis = op.attribs.get('axis', 1) return (op.inputs[0].shape, op.inputs[0].shape[:axis] + [1], op.inputs[0].shape[:axis] + [1]), \ (op.inputs[0].dtype, op.inputs[0].dtype, op.inputs[0].dtype) def merge_dim_shape(op): # type: (Caffe2Operation)->ShapeResult if op.inputs[0].rank >= 2: return [op.inputs[0].shape[0] * op.inputs[0].shape[1]] + op.inputs[0].shape[2:], op.inputs[0].dtype else: return op.inputs[0].shape, op.inputs[0].dtype def pad_image_shape(op): # type: (Caffe2Operation)->ShapeResult is_nhwc = op.attribs.get('order', 'NCHW').upper() == 'NHWC' def expand(list, default): if is_nhwc: return [default] + list + [default] else: return [default, default] + list return infer.pad(input=op.inputs[0].shape, padding=expand(caffe2_pads_to_nnef_padding(op.attribs['pads']), (0, 0))), op.inputs[0].dtype def quant_decode_shape(op): # type: (Caffe2Operation)->ShapeResult if len(op.inputs) == 2: return op.inputs[1].shape, op.inputs[1].dtype elif len(op.inputs) >= 3: return tuple(i.shape for i in op.inputs[1:]), tuple(i.dtype for i in op.inputs[1:]) else: assert False def resize_nearest_shape(op): # type: (Caffe2Operation)->ShapeResult assert op.inputs[0].rank == 4 width_scale = op.attribs.get('width_scale', 1.0) height_scale = op.attribs.get('height_scale', 1.0) is_nhwc = op.attribs.get('order', 'NCHW').upper() == 'NHWC' shape = op.inputs[0].shape if is_nhwc: return [shape[0], int(shape[1] * height_scale), int(shape[2] * width_scale), shape[3]], op.inputs[0].dtype else: return [shape[0], shape[1], int(shape[2] * height_scale), int(shape[3] * width_scale)], op.inputs[0].dtype def roi_align_shape(op): # type: (Caffe2Operation)->ShapeResult is_nhwc = op.attribs.get('order', 'NCHW').upper() == 'NHWC' x, rois = op.inputs R, _4or5 = rois.shape pooled_h = op.attribs.get('pooled_h', 1) pooled_w = op.attribs.get('pooled_w', 1) if is_nhwc: N, H, W, C = x.shape return [R, pooled_h, pooled_w, C], x.dtype else: N, C, H, W = x.shape return [R, C, pooled_h, pooled_w], x.dtype def roi_pool_shape(op): # type: (Caffe2Operation)->ShapeResult if not op.attribs.get('is_test', 0): raise utils.NNEFToolsException("RoIPool: only is_test=1 is supported.") is_nhwc = op.attribs.get('order', 'NCHW').upper() == 'NHWC' x, rois = op.inputs num_rois, _5 = rois.shape pooled_h = op.attribs.get('pooled_h', 1) pooled_w = op.attribs.get('pooled_w', 1) if is_nhwc: N, H, W, C = x.shape shape = [num_rois, pooled_h, pooled_w, C] else: N, C, H, W = x.shape shape = [num_rois, C, pooled_h, pooled_w] if len(op.outputs) == 1: return shape, x.dtype else: return (shape, shape), (x.dtype, DTYPE_INT32) def size_shape(op): # type: (Caffe2Operation)->ShapeResult graph_utils.replace_tensor_in_consumers(op.graph, op.outputs[0], Caffe2Tensor(graph=op.graph, shape=[], data=np.array(op.inputs[0].count, dtype=np.int64), dtype=DTYPE_INT64), remove=False) return one_element_0d_shape(op) def slice_shape(op): # type: (Caffe2Operation)->ShapeResult # Currently, only slicing in a single dimension is supported in Caffe2 if len(op.inputs) == 1: starts = op.attribs['starts'] ends = op.attribs['ends'] elif len(op.inputs) == 3: if op.inputs[1].data is None: raise utils.NNEFToolsException('Slice is not supported with calculated sizes.') if op.inputs[2].data is None: raise utils.NNEFToolsException('Slice is not supported with calculated sizes.') starts = op.inputs[1].data.tolist() ends = op.inputs[2].data.tolist() else: assert False op.attribs = { 'starts': starts, 'ends': ends, } op.inputs = (op.inputs[0],) return infer.slice(op.inputs[0].shape, begin=starts, end=[e + 1 if e < 0 else e for e in ends], zero_means_all=True), op.input.dtype def spatial_bn_shape(op): # type: (Caffe2Operation)->ShapeResult if not op.attribs.get('is_test', 0): raise utils.NNEFToolsException("SpatialBN: only is_test=1 is supported.") is_nhwc = op.attribs.get('order', 'NCHW').upper() == 'NHWC' c = op.inputs[0].shape[-1] if is_nhwc else op.inputs[0].shape[1] if len(op.outputs) == 1: return op.inputs[0].shape, op.inputs[0].dtype assert len(op.outputs) == 5 return (op.inputs[0].shape, [c], [c], [c], [c]), (op.inputs[0].dtype,) * 5 def range_shape(op): # type: (Caffe2Operation)->ShapeResult if len(op.inputs) == 1: start = 0 if op.inputs[0].data is None: raise utils.NNEFToolsException('Range is not supported with calculated sizes.') stop = op.inputs[0].data.tolist() step = 1 elif len(op.inputs) == 2: if op.inputs[0].data is None: raise utils.NNEFToolsException('Range is not supported with calculated sizes.') start = op.inputs[0].data.tolist() if op.inputs[1].data is None: raise utils.NNEFToolsException('Range is not supported with calculated sizes.') stop = op.inputs[1].data.tolist() step = 1 elif len(op.inputs) == 3: if op.inputs[0].data is None: raise utils.NNEFToolsException('Range is not supported with calculated sizes.') start = op.inputs[0].data.tolist() if op.inputs[1].data is None: raise utils.NNEFToolsException('Range is not supported with calculated sizes.') stop = op.inputs[1].data.tolist() if op.inputs[2].data is None: raise utils.NNEFToolsException('Range is not supported with calculated sizes.') step = op.inputs[2].data.tolist() else: assert False return [len(
np.arange(start, stop, step)
numpy.arange
import numpy as np import pandas as pd import math import matplotlib from matplotlib.colors import ListedColormap import matplotlib.pyplot as plt import scipy.spatial as ss import seaborn from ..logging import info from ..read_export import load_anndata def bandwidth_nrd(x): x = pd.Series(x) h = (x.quantile([0.75]).values - x.quantile([0.25]).values) / 1.34 return 4 * 1.06 * min(math.sqrt(np.var(x, ddof=1)), h) * (len(x) ** (-1 / 5)) def rep(x, length): len_x = len(x) n = int(length / len_x) r = length % len_x re = [] for i in range(0, n): re = re + x for i in range(0, r): re = re + [x[i]] return re # https://stackoverflow.com/questions/46166933/python-numpy-equivalent-of-r-rep-and-rep-len-functions?rq=1 # def rep2(x, length): # x = np.array(x) # res = np.repeat(x, length, axis=0) # return res def rep2(x, length_out): return np.tile(x, length_out // len(x) + 1)[:length_out] def dnorm(x, u=0, sig=1): return np.exp(-(x - u) ** 2 / (2 * sig ** 2)) / (math.sqrt(2 * math.pi) * sig) def kde2d(x, y, h=None, n=25, lims=None): """Reproduce kde2d function behavior from MASS package in R. Two-dimensional kernel density estimation with an axis-aligned bivariate normal kernel, evaluated on a square grid. Arguments --------- x: `List` x coordinate of data y: `List` y coordinate of data h: `List` (Default: None) vector of bandwidths for :math:`x` and :math:`y` directions. Defaults to normal reference bandwidth (see `bandwidth.nrd`). A scalar value will be taken to apply to both directions. n: `int` (Default: 25) Number of grid points in each direction. Can be scalar or a length-2 integer list. lims: `List` (Default: None) The limits of the rectangle covered by the grid as :math:`x_l, x_u, y_l, y_u`. Returns ------- A list of three components gx, gy: `List` The x and y coordinates of the grid points, lists of length `n`. z: `List` An :math:`n[1]` by :math:`n[2]` matrix of the estimated density: rows correspond to the value of :math:`x`, columns to the value of :math:`y`. """ nx = len(x) if not lims: lims = [min(x), max(x), min(y), max(y)] if (len(y) != nx): raise Exception("data vectors must be the same length") elif ((False in np.isfinite(x)) or (False in np.isfinite(y))): raise Exception("missing or infinite values in the data are not allowed") elif (False in np.isfinite(lims)): raise Exception("only finite values are allowed in 'lims'") else: n = rep(n, length=2) if isinstance(n, list) else rep([n], length=2) gx = np.linspace(lims[0], lims[1], n[0]) gy = np.linspace(lims[2], lims[3], n[1]) if h is None: h = [bandwidth_nrd(x), bandwidth_nrd(y)] else: h = np.array(rep(h, length=2)) if h[0] <= 0 or h[1] <= 0: raise Exception("bandwidths must be strictly positive") else: h /= 4 ax = pd.DataFrame() ay = pd.DataFrame() for i in range(len(x)): ax[i] = (gx - x[i]) / h[0] for i in range(len(y)): ay[i] = (gy - y[i]) / h[1] z = (np.matrix(dnorm(ax)) * np.matrix(dnorm(ay).T)) / (nx * h[0] * h[1]) return gx, gy, z # understand the login information and use that for verbose def viz_response(adata, pairs_mat, log=False, delay=1, k=5, grid_num=25, n_row=None, n_col=1, scales="free", return_data = False, verbose=False): """Plot the lagged DREVI plot pairs of genes across pseudotime. This plotting function builds on the original idea of DREVI plot but is extended in the context for causal network. It considers the time delay between the hypothetical regulators to the target genes which is parametered by :math:`d`. Lagged DREVI plot first estimates the joint density (:math:`P(x_{t - d}, y_t)`) for variables :math:`x_{t - d} and y_t`, then it divides the joint density by the marginal density :math:`P(x_{t - d})` to get the conditional density estimate (:math:`P(x_{t - d}, y_t | x_{x - d})`). We then calculate the z-score normalizing each column of conditional density. Note that this plot tries to demonstrate the potential influence between two variables instead of the factual influence. A red line corresponding to the point with maximal density on each :math:`x` value is plot which indicates the maximal possible point for :math:`y_t` give the value of :math:`x_{t - d}`. The 2-d density is estimated through the kde2d function. Arguments --------- adata: `Anndata` Annotated Data Frame, an Anndata object. pairs_mat: 'np.ndarray' A matrix where each row is the gene pair and the first column is the hypothetical source or regulator while the second column represents the hypothetical target. The name in this matrix should match the name in the gene_short_name column of the adata object. log: `bool` (Default: False) A logic argument used to determine whether or not you should perform log transformation (using :math:`log(expression + 1)`) before calculating density estimates, default to be TRUE. delay: `int` (Default: 1) The time delay between the source and target gene. k: `int` (Default: 5) Number of k-nearest neighbors used in calculating 2-D kernel density grid_num: `int` (Default: 25) The number of grid when creating the lagged DREVI plot. n_row: `int` (Default: None) number of columns used to layout the faceted cluster panels. n_col: `int` (Default: 1) number of columns used to layout the faceted cluster panels. scales: `str` (Default: 'free') The character string passed to facet function, determines whether or not the scale is fixed or free in different dimensions. (not used) verbose: A logic argument to determine whether or not we should print the detailed running information. Returns ------- In addition to figure created by matplotlib, it also returns: flat_res: 'pd.core.frame.DataFrame' a pandas data frame used to create the heatmap with four columns (`x`: x-coordinate; `y`: y-coordinate; `den`: estimated density at x/y coordinate; `type`: the corresponding gene pair). flat_res_subset: 'pd.core.frame.DataFrame' a pandas data frame used to create the heatmap for the last gene pair (if multiple gene-pairs are inputted) with four columns (`x`: x-coordinate; `y`: y-coordinate; `den`: estimated density at x/y coordinate; `type`: the corresponding gene pair). ridge_curve_subset: 'pd.core.frame.DataFrame' a pandas data frame used to create the read ridge line for the last gene pair (if multiple gene-pairs are inputted) with four columns (`x`: x-coordinate; `y`: y-coordinate; `type`: the corresponding gene pair). """ model = load_anndata(adata) data = model.X # pd.DataFrame(model.expression.values,index = adata.var_names) all_genes_in_pair = np.unique(pairs_mat) if (not (set(all_genes_in_pair) <= set(data.index.values))): raise Exception( "adata doesn't include all genes in gene_pairs_mat. Make sure all genes are included in gene_short_name column of the obs property of adata.") sub_data = data.loc[all_genes_in_pair, :] if grid_num == None: dim_val = (round((len(sub_data) - delay) / 4)) else: dim_val = grid_num flat_res = pd.DataFrame(columns=["x", "y", "den", "type"]) ###empty df ridge_curve = pd.DataFrame(columns=["x", "y", "type"]) xy = pd.DataFrame() id = 0 for gene_pairs_ind in range(len(pairs_mat)): if verbose: info("current gene pair is ", pairs_mat[gene_pairs_ind, 0], " -> ", pairs_mat[gene_pairs_ind, 1]) ############ gene_pairs = pairs_mat[gene_pairs_ind, :] f_ini_ind = (dim_val ** 2) * id - 1 r_ini_ind = dim_val * id - 1 gene_pair_name = gene_pairs[0] + '->' + gene_pairs[1] x = [i for i in sub_data.loc[gene_pairs[0]].dropna()] y_ori = [i for i in sub_data.loc[gene_pairs[1]].dropna()] if log: x, y_ori = np.log(np.array(x) + 1), np.log(np.array(y_ori) + 1) if delay != 0: x = x[:-delay] y = y_ori[delay:] z = y_ori[delay - 1:-1] else: y = y_ori z = y_ori # add LaTex equation in matlibplot bandwidth = [bandwidth_nrd(x), bandwidth_nrd(y)] if 0 in bandwidth: max_vec = [max(x), max(y)] bandwidth[bandwidth == 0] = max_vec[bandwidth == 0] / dim_val x_meshgrid, y_meshgrid, den_res = kde2d(x, y, n=[dim_val, dim_val], lims=[min(x), max(x), min(y), max(y)], h=bandwidth) den_res = np.array(den_res) den_x = np.sum(den_res, axis=0) max_ind = 1 den_res = den_res.tolist() for i in range(len(x_meshgrid)): tmp = den_res[i] / den_x[i] max_val = max(tmp) min_val = 0 if np.sum(den_x[i] != 0): rescaled_val = (den_res[i] / den_x[i] - min_val) / (max_val - min_val) max_ind = np.argmax(rescaled_val) res_Row = pd.DataFrame([[x_meshgrid[i], y_meshgrid[max_ind], gene_pair_name]], columns=["x", "y", "type"], index=[r_ini_ind + i]) ridge_curve = pd.concat([ridge_curve, res_Row]) for j in range(len(y_meshgrid)): rescaled_val = (den_res[i][j] / den_x[i] - min_val) / (max_val - min_val) res_Row = pd.DataFrame([[x_meshgrid[i], y_meshgrid[j], rescaled_val, gene_pair_name]], columns=["x", "y", "den", "type"], index=[i * len(x_meshgrid) + j + f_ini_ind]) flat_res = pd.concat([flat_res, res_Row]) cur_data = pd.DataFrame({'x': x, 'y': y, 'type': gene_pair_name}) xy = pd.concat([xy, cur_data], axis=0) id = id + 1 gene_pairs_num = flat_res.type.value_counts().shape[0] n_row = gene_pairs_num if n_row is None else n_row if n_row * n_col < gene_pairs_num: raise Exception("The number of row or column specified is less than the gene pairs") fig, axes = plt.subplots(n_row, n_col, figsize=(8, 8), sharex=False, sharey=False, squeeze=False) plt.xlabel(r'$x_{t-1}$') plt.ylabel(r'$y_{t}$') i, j = 0, 0 for x, flat_res_type in enumerate(flat_res.type.value_counts().index.values): flat_res_subset = flat_res[flat_res['type'] == flat_res_type] ridge_curve_subset = ridge_curve[ridge_curve['type'] == flat_res_type] xy_subset = xy[xy['type'] == flat_res_type] x_val, y_val = flat_res_subset['x'], flat_res_subset['y'] i, j = x % n_row, x // n_row # %: remainder; //: integer division im = axes[i, j].imshow(flat_res_subset['den'].values.reshape(dim_val, dim_val).T, interpolation='mitchell', origin='lower', extent=(min(x_val), max(x_val), min(y_val), max(y_val)), cmap=matplotlib.colors.LinearSegmentedColormap.from_list('my_map', ['#000000', '#000000', '#000000', '#800080', '#FF0000', '#FFFF00'])) axes[i, j].title.set_text(flat_res_type) # ridge_curve_subset = pd.DataFrame(flat_res_subset).loc[pd.DataFrame(flat_res_subset).groupby('x')['den'].idxmax()] axes[i, j].plot(ridge_curve_subset['x'].values, ridge_curve_subset['y'].values, color='red') # axes[i, j].plot(flat_res_subset['x'], [0.01]*len(flat_res_subset['x']), '|', color='white') # axes[i, j].plot([0.01]*len(flat_res_subset['y']), flat_res_subset['y'], '|', color='white') seaborn.rugplot(xy_subset['x'].values, height=0.05, axis='x', ax=axes[i, j], c="darkred", alpha=0.25) seaborn.rugplot(xy_subset['y'].values, height=0.025, axis='y', ax=axes[i, j], c="darkred", alpha=0.25) fig.colorbar(im, ax=axes) plt.show() if return_data: return (flat_res, flat_res_subset, ridge_curve_subset) def viz_causality(adata, pairs_mat, log=False, delay=1, k=5, grid_num=25, n_row=None, n_col=1, scales="free", return_data = False, verbose=False): """Plot the heatmap for the expected value :math:`y(t)` given :math:`x(t - d)` and :math:`y(t - 1)`. This plotting function tries to intuitively visualize the informatioin transfer from :math:`x(t - d)` to :math:`y(t)` given :math:`y(t)`'s previous state :math:`y(t - 1)`. Firstly, we divide the expression space for :math:`x(t - d)` to :math:`y(t - 1)` based on grid_num and then we estimate the k-nearest neighbor for each of the grid. We then use a Gaussian kernel to estimate the expected value for :math:`y(t)`. It is then displayed in two dimension with :math:`x(t - d)` and :math:`y(t - 1)` as two axis and the color represents the expected value of :math:`y(t)` give :math:`x(t - d)` and :math:`y(t - 1)`. This function accepts a matrix where each row is the gene pair and the first column is the hypothetical source or regulator while the second column represents the hypothetical target. The name in this matrix should match the name in the gene_short_name column of the cds_subset object. Arguments --------- adata: `Anndata` Annotated Data Frame, an Anndata object. pairs_mat: 'np.ndarray' A matrix where each row is the gene pair and the first column is the hypothetical source or regulator while the second column represents the hypothetical target. The name in this matrix should match the name in the gene_short_name column of the adata object. log: `bool` (Default: False) A logic argument used to determine whether or not you should perform log transformation (using log(expression + 1)) before calculating density estimates, default to be TRUE. delay: `int` (Default: 1) The time delay between the source and target gene. k: `int` (Default: 5) Number of k-nearest neighbors used in calculating 2-D kernel density grid_num: `int` (Default: 25) The number of grid when creating the lagged DREVI plot. n_row: `int` (Default: None) number of columns used to layout the faceted cluster panels. n_col: `int` (Default: 1) number of columns used to layout the faceted cluster panels. scales: `str` (Default: 'free') The character string passed to facet function, determines whether or not the scale is fixed or free in different dimensions. (not used) verbose: A logic argument to determine whether or not we should print the detailed running information. Returns ------- A figure created by matplotlib. """ model = load_anndata(adata) data = model.X all_genes_in_pair =
np.unique(pairs_mat)
numpy.unique
import argparse import logging import sys from pathlib import Path from cv2 import exp import numpy as np from typing import List from typing import Tuple from typing import Union from typing import Dict from typing import Sequence from typing import Any from typing import Iterable import json import os from collections import OrderedDict import copy import pandas as pd from collections import defaultdict from sklearn.metrics import recall_score def softmax(x: np.ndarray): if x.ndim == 1: x = x.reshape((1, -1)) max_x = np.max(x, axis=1).reshape((-1, 1)) exp_x = np.exp(x - max_x) return exp_x / np.sum(exp_x, axis=1).reshape((-1, 1)) class EPIC_KITCHENS100_Evaluator(): def __init__(self,dataset ,root='./data/EK100'): self.action_composition = dataset.action_composition self.vn2action = {(v,n) :i for i,(v,n) in enumerate(self.action_composition)} self.verb_to_action_set = defaultdict(list) self.noun_to_action_set = defaultdict(list) for i,(v,n) in enumerate(self.action_composition): self.verb_to_action_set[v].append(i) self.noun_to_action_set[n].append(i) self.num_action = dataset.num_action self.num_verb = dataset.num_verb self.num_noun = dataset.num_noun self.tail_class_verbs = pd.read_csv( os.path.join(root,"EPIC_100_tail_verbs.csv"), index_col="verb" ).index.values.tolist() self.tail_class_nouns = pd.read_csv( os.path.join(root,"EPIC_100_tail_nouns.csv"), index_col="noun" ).index.values.tolist() self.tail_class_action = [] for i,(v,n) in enumerate(self.action_composition): if v in self.tail_class_verbs or n in self.tail_class_nouns: self.tail_class_action.append(i) self.unseen_participant_ids = pd.read_csv( os.path.join(root,"EPIC_100_unseen_participant_ids_validation.csv"), index_col="participant_id", ).index.values.tolist() def compute_recall(self,rank,gt_class,classes,topk=5): if len(classes) == 0: return 0 is_topk_correct = np.any(rank[:,:topk] == np.expand_dims(gt_class,1),axis=1) recall = recall_score( y_true = gt_class, y_pred = np.where(is_topk_correct,gt_class,rank[:,0]), labels = classes, average = None ) return np.nanmean(recall) def intersection(self,classes, labels): return np.intersect1d(classes, np.unique(labels)) def __call__(self, prediction,gt): if 'action' in prediction and 'verb' in prediction and 'noun' in prediction: score_verb = prediction['verb'] score_noun = prediction['noun'] score_action = prediction['action'] elif 'action' not in prediction: score_verb = prediction['verb'] score_noun = prediction['noun'] prob_verb = softmax(score_verb) prob_noun = softmax(score_noun) score_action = np.zeros((prob_verb.shape[0],self.num_action)) for i, (v,n) in enumerate(self.action_composition): score_action[:,i] = prob_verb[:,v] * prob_noun[:,n] elif 'verb' not in prediction and 'noun' not in prediction: score_action = prediction['action'] score_noun = np.zeros((score_action.shape[0],self.num_noun)) score_verb = np.zeros((score_action.shape[0],self.num_verb)) for i in range(self.num_noun): if i not in self.noun_to_action_set: score_noun[:,i] = 0 else: score_noun[:,i] = score_action[:,self.noun_to_action_set[i]].max(1) for i in range(self.num_verb): if i not in self.verb_to_action_set: score_verb[:,i] = 0 else: score_verb[:,i] = score_action[:,self.verb_to_action_set[i]].max(1) else: raise NotImplementedError dirty = False if dirty: score_verb = copy.deepcopy(score_verb) score_noun = copy.deepcopy(score_noun) score_action = copy.deepcopy(score_action) score_verb[: , ~ np.in1d(np.arange(self.num_verb),np.unique(gt['verb']))] = - np.inf score_noun[: , ~ np.in1d(np.arange(self.num_noun),np.unique(gt['noun']))] = - np.inf score_action[: , ~ np.in1d(np.arange(self.num_action),np.unique(gt['action']))] = - np.inf rank_verb = np.argsort(score_verb,axis=1)[:,::-1] rank_noun = np.argsort(score_noun,axis=1)[:,::-1] rank_action =
np.argsort(score_action,axis=1)
numpy.argsort
import bisect import numpy as np ######################################## # Algorithms ######################################## # Compute indices of slice of sorted data which fit into the provided range def slice_sorted(data, rng): return [ bisect.bisect_left(data, rng[0]), bisect.bisect_right(data, rng[1])] # Take a list of strings, return a unique list of strings of the same length # Non-unique strings will be appended their index at the end # It is guaranteed that index increments with position in the list def string_list_pad_unique(data1D, suffix=''): d = {} rez = [] for elem in data1D: if elem not in d.keys(): d[elem] = 0 rez += [elem] else: d[elem] += 1 rez += [elem + suffix + str(d[elem])] return rez ######################################## # Permutation operations ######################################## # Finds permutation map A->B of elements of two arrays, which are permutations of each other def perm_map_arr(a, b): return np.where(b.reshape(b.size, 1) == a)[1] # Same as perm_map_arr, but for string characters def perm_map_str(a, b): return perm_map_arr(np.array(list(a)), np.array(list(b))) ######################################## # Set operations ######################################## # Returns a list only containing unique items # Unlike Set(), order of unique items from the original list is preserved def unique_ordered(lst): return list(dict.fromkeys(lst)) # Returns set subtraction of s1 - s2, preserving order of s1 def unique_subtract(s1, s2): rez = [s for s in s1 if s not in s2] if type(s1) == list: return rez elif type(s1) == str: return "".join(rez) elif type(s1) == tuple: return tuple(rez) else: raise ValueError("Unexpected Type", type(s1)) ######################################## # Non-uniform dimension array lists ######################################## # Test if a given dimension is part of a dimension order def assert_get_dim_idx(dimOrd, trgDim, label="TASK_NAME", canonical=False): if trgDim in dimOrd: return dimOrd.index(trgDim) else: if canonical: dimNameDict = { "p": "processes (aka channels)", "s": "samples (aka times)", "r": "repetitions (aka trials)" } raise ValueError(label, "requires", dimNameDict[trgDim], "dimension; have", dimOrd) else: raise ValueError(label, "not found", trgDim, "in", dimOrd) ######################################## # Non-uniform dimension array lists ######################################## def set_list_shapes(lst, axis=None): if axis is None: return list(set([elem.shape for elem in lst])) else: return list(set([elem.shape[axis] for elem in lst])) def list_assert_get_uniform_shape(lst, axis=None): if len(lst) == 0: raise ValueError("Got empty list") shapes = set_list_shapes(lst, axis) if len(shapes) > 1: raise ValueError("Expected uniform shapes for axis", axis, "; got", shapes) return next(iter(shapes)) ######################################## # Multivariate dimension operations ######################################## # Transpose data dimensions given permutation of axis labels # If augment option is on, then extra axis of length 1 are added when missing def numpy_transpose_byorder(data, orderSrc, orderTrg, augment=False): if data.ndim != len(orderSrc): raise ValueError("Incompatible data", data.shape, "and order", orderSrc) if not augment: if sorted(orderSrc) != sorted(orderTrg): raise ValueError('Cannot transform', orderSrc, "to", orderTrg) return data.transpose(perm_map_str(orderSrc, orderTrg)) else: if not set(orderSrc).issubset(set(orderTrg)): raise ValueError('Cannot augment', orderSrc, "to", orderTrg) nIncr = len(orderTrg) - len(orderSrc) newShape = data.shape + tuple([1]*nIncr) newOrder = orderSrc + unique_subtract(orderTrg, orderSrc) return data.reshape(newShape).transpose(perm_map_str(newOrder, orderTrg)) # Return original shape, but replace all axis that have been reduced with 1s # So final shape looks as if it is of the same dimension as original # Useful for broadcasting reduced arrays onto original arrays def numpy_shape_reduced_axes(shapeOrig, reducedAxis): if reducedAxis is None: # All axes have been reduced return tuple([1]*len(shapeOrig)) else: if not isinstance(reducedAxis, tuple): reducedAxis = (reducedAxis,) shapeNew = list(shapeOrig) for idx in reducedAxis: shapeNew[idx] = 1 return tuple(shapeNew) # Add extra dimensions of size 1 to array at given locations def numpy_add_empty_axes(x, axes): newShape = list(x.shape) for axis in axes: newShape.insert(axis, 1) return x.reshape(tuple(newShape)) # Reshape array by merging all dimensions between l and r def numpy_merge_dimensions(data, l, r): shOrig = list(data.shape) shNew = tuple(shOrig[:l] + [np.prod(shOrig[l:r])] + shOrig[r:]) return data.reshape(shNew) # Move a dimension from one place to another # Example1: [0,1,2,3,4,5], 3, 1 -> [0,3,1,2,4,5] # Example2: [0,1,2,3,4,5], 1, 3 -> [0,2,3,1,4,5] def numpy_move_dimension(data, axisOld, axisNew): ord = list(
np.arange(data.ndim)
numpy.arange
from __future__ import division import numpy as np import soundfile as sf from scipy.signal import spectrogram import scipy.stats from . import timbral_util def timbral_depth(fname, fs=0, dev_output=False, phase_correction=False, clip_output=False, threshold_db=-60, low_frequency_limit=20, centroid_crossover_frequency=2000, ratio_crossover_frequency=500, db_decay_threshold=-40): """ This function calculates the apparent Depth of an audio file. This version of timbral_depth contains self loudness normalising methods and can accept arrays as an input instead of a string filename. Version 0.4 Required parameter :param fname: string or numpy array string, audio filename to be analysed, including full file path and extension. numpy array, array of audio samples, requires fs to be set to the sample rate. Optional parameters :param fs: int/float, when fname is a numpy array, this is a required to be the sample rate. Defaults to 0. :param phase_correction: bool, perform phase checking before summing to mono. Defaults to False. :param dev_output: bool, when False return the depth, when True return all extracted features. Default to False. :param threshold_db: float/int (negative), threshold, in dB, for calculating centroids. Should be negative. Defaults to -60. :param low_frequency_limit: float/int, low frequency limit at which to highpass filter the audio, in Hz. Defaults to 20. :param centroid_crossover_frequency: float/int, crossover frequency for calculating the spectral centroid, in Hz. Defaults to 2000 :param ratio_crossover_frequency: float/int, crossover frequency for calculating the ratio, in Hz. Defaults to 500. :param db_decay_threshold: float/int (negative), threshold, in dB, for estimating duration. Should be negative. Defaults to -40. :return: float, aparent depth of audio file, float. Copyright 2018 <NAME>, Institute of Sound Recording, University of Surrey, UK. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ ''' Read input ''' audio_samples, fs = timbral_util.file_read(fname, fs, phase_correction=phase_correction) ''' Filter audio ''' # highpass audio - run 3 times to get -18dB per octave - unstable filters produced when using a 6th order audio_samples = timbral_util.filter_audio_highpass(audio_samples, crossover=low_frequency_limit, fs=fs) audio_samples = timbral_util.filter_audio_highpass(audio_samples, crossover=low_frequency_limit, fs=fs) audio_samples = timbral_util.filter_audio_highpass(audio_samples, crossover=low_frequency_limit, fs=fs) # running 3 times to get -18dB per octave rolloff, greater than second order filters are unstable in python lowpass_centroid_audio_samples = timbral_util.filter_audio_lowpass(audio_samples,crossover=centroid_crossover_frequency,fs=fs) lowpass_centroid_audio_samples = timbral_util.filter_audio_lowpass(lowpass_centroid_audio_samples,crossover=centroid_crossover_frequency,fs=fs) lowpass_centroid_audio_samples = timbral_util.filter_audio_lowpass(lowpass_centroid_audio_samples,crossover=centroid_crossover_frequency,fs=fs) lowpass_ratio_audio_samples = timbral_util.filter_audio_lowpass(audio_samples,crossover=ratio_crossover_frequency,fs=fs) lowpass_ratio_audio_samples = timbral_util.filter_audio_lowpass(lowpass_ratio_audio_samples,crossover=ratio_crossover_frequency,fs=fs) lowpass_ratio_audio_samples = timbral_util.filter_audio_lowpass(lowpass_ratio_audio_samples,crossover=ratio_crossover_frequency,fs=fs) ''' Get spectrograms and normalise ''' # normalise audio lowpass_ratio_audio_samples *= (1.0 / max(abs(audio_samples))) lowpass_centroid_audio_samples *= (1.0 / max(abs(audio_samples))) audio_samples *= (1.0 / max(abs(audio_samples))) # set FFT parameters nfft = 4096 hop_size = int(3 * nfft / 4) # get spectrogram if len(audio_samples) > nfft: freq, time, spec = spectrogram(audio_samples, fs, 'hamming', nfft, hop_size, nfft, 'constant', True, 'spectrum') lp_centroid_freq, lp_centroid_time, lp_centroid_spec = spectrogram(lowpass_centroid_audio_samples, fs, 'hamming', nfft, hop_size, nfft, 'constant', True, 'spectrum') lp_ratio_freq, lp_ratio_time, lp_ratio_spec = spectrogram(lowpass_ratio_audio_samples, fs, 'hamming', nfft, hop_size, nfft, 'constant', True, 'spectrum') else: # file is shorter than 4096, just take the fft freq, time, spec = spectrogram(audio_samples, fs, 'hamming', len(audio_samples), len(audio_samples)-1, nfft, 'constant', True, 'spectrum') lp_centroid_freq, lp_centroid_time, lp_centroid_spec = spectrogram(lowpass_centroid_audio_samples, fs, 'hamming', len(lowpass_centroid_audio_samples), len(lowpass_centroid_audio_samples)-1, nfft, 'constant', True, 'spectrum') lp_ratio_freq, lp_ratio_time, lp_ratio_spec = spectrogram(lowpass_ratio_audio_samples, fs, 'hamming', len(lowpass_ratio_audio_samples), len(lowpass_ratio_audio_samples)-1, nfft, 'constant', True, 'spectrum') threshold = timbral_util.db2mag(threshold_db) ''' METRIC 1 - limited weighted mean normalised lower centroid ''' # define arrays for storing metrics all_normalised_lower_centroid = [] all_normalised_centroid_tpower = [] # get metrics for each time segment of the spectrogram for idx in range(len(time)): # get overall spectrum of time frame current_spectrum = spec[:, idx] # calculate time window power tpower = np.sum(current_spectrum) all_normalised_centroid_tpower.append(tpower) # estimate if time segment contains audio energy or just noise if tpower > threshold: # get the spectrum lower_spectrum = lp_centroid_spec[:, idx] lower_power = np.sum(lower_spectrum) # get lower centroid lower_centroid = np.sum(lower_spectrum * lp_centroid_freq) / float(lower_power) # append to list all_normalised_lower_centroid.append(lower_centroid) else: all_normalised_lower_centroid.append(0) # calculate the weighted mean of lower centroids weighted_mean_normalised_lower_centroid = np.average(all_normalised_lower_centroid, weights=all_normalised_centroid_tpower) # limit to the centroid crossover frequency if weighted_mean_normalised_lower_centroid > centroid_crossover_frequency: limited_weighted_mean_normalised_lower_centroid = np.float64(centroid_crossover_frequency) else: limited_weighted_mean_normalised_lower_centroid = weighted_mean_normalised_lower_centroid ''' METRIC 2 - weighted mean normalised lower ratio ''' # define arrays for storing metrics all_normalised_lower_ratio = [] all_normalised_ratio_tpower = [] # get metrics for each time segment of the spectrogram for idx in range(len(time)): # get time frame of broadband spectrum current_spectrum = spec[:, idx] tpower =
np.sum(current_spectrum)
numpy.sum
import numpy as np import matplotlib.pyplot as plt import matplotlib import sys from astropy.io import ascii import matplotlib.ticker as ticker from matplotlib.patches import Rectangle # Some font setting text_width = 523.5307/72 column_width = 256.0748/72 matplotlib.rc("text", usetex=True) matplotlib.rc("text.latex", preamble= r""" \usepackage{txfonts} \newcommand{\mathdefault}[1][]{}""") matplotlib.rc("font", family="Times", size=10) #set up the figure grid PEE panels, a gap and then the PBB panels gridspec = dict(hspace=0.0, wspace=0.0, width_ratios=[1, 1, 0.2, 1, 1]) fig, axes = plt.subplots(nrows=5, ncols=5, gridspec_kw=gridspec, figsize=(text_width, text_width*0.75)) # Read in user input to set the patch, blind, zmin,zmax, nbootstrap #if len(sys.argv) <2: # print("Usage: %s LFVER BLIND e.g 2Dbins_v2_goldclasses_Flag_SOM_Fid A" % sys.argv[0]) # sys.exit(1) #else: # LFVER=sys.argv[1] # catalogue version identifier # BLIND=sys.argv[2] # blind LFVER="2Dbins_v2_goldclasses_Flag_SOM_Fid" BLIND="C" # number of tomographic bins, and band power modes to plot ntomobin=5 nlensbin=2 ntomocomb=15 nmodes=8 # before we read in the per tomo bin combination data, we need to read in the full covariance from the mocks #These are 3x2pt covs, even though they are stored in the Pkk_cov directory Bcovdat='../Pkk/Pkk_cov/thps_cov_kids1000_mar30_bandpower_B_apod_0_matrix.dat' Bcov=
np.loadtxt(Bcovdat)
numpy.loadtxt
# Copyright (C) 2017 TU Dresden # Licensed under the ISC license (see LICENSE.txt) # # Author: <NAME> from __future__ import print_function import numpy as np import numba as nb import cvxpy as cvx import math import random from os.path import exists import json # import fjlt.fjlt as fjlt #TODO: use fjlt to (automatically) lower the dimension of embedding from mocasin.representations import metric_spaces as metric from mocasin.util import logging import mocasin.util.random_distributions.lp as lp log = logging.getLogger(__name__) # An embedding \iota: M \hookrightarrow R^k # will be calculated and realized as a lookup-table. # This does not scale for large metric spaces as well. # # However: the idea is to do this for the small space M # and then handle the case M^d \hat \iota R^{kd} specially. # from: https://jeremykun.com/2016/02/08/big-dimensions-and-what-you-can-do-about-it/0/ def randomSubspace(subspaceDimension, ambientDimension): return np.random.normal(0, 1, size=(subspaceDimension, ambientDimension)) def project(v, subspace): subspaceDimension = len(subspace) return (1 / math.sqrt(subspaceDimension)) * subspace.dot(v) def jlt(data, subspaceDimension): ambientDimension = len(data[0]) A = randomSubspace(subspaceDimension, ambientDimension) return (1 / math.sqrt(subspaceDimension)) * A.dot(data.T).T def jlt_search(D, E, target_dist, num_tries=30): dim = 2 dim_orig = len(E[0]) found = False while not found and dim < dim_orig: log.info(f"jlt search: increasing dimension to {dim}") for _ in range(num_tries): candidate = jlt(E, dim) cur_distortion = check_distortion(D, candidate) log.debug(f"jlt search: found distortion of {cur_distortion}") if cur_distortion < target_dist: found = True break dim = dim * 2 if found: return np.array(candidate), cur_distortion else: return E, check_distortion(D, E) def check_distortion(D, E): distortion = 1 it = np.nditer(np.array(D), flags=["multi_index"]) for dist in it: x, y = it.multi_index distance_vecs = np.linalg.norm(E[x] - E[y]) if dist != 0 and distance_vecs != 0: distort = np.abs(distance_vecs / dist) distort = max(distort, 1 / distort) elif distance_vecs != 0: # dist = 0 distort = 1 + np.abs(distance_vecs) else: # both 0 distort = 0 if distort > distortion: distortion = distort return distortion # To whomever someday has the misfortune of having to mantain this code: # I'm sorry. These functions are confusing. I'll try my best to explain them. # The basic idea here is speeding up the approximation of a vector in the # representation to the closest vector representing an actual mapping. # It's split in two functions that we compile with the numba JIT, # the base case (from the MetricSpaceEmbeddingBase class) and the full # one. The base case just takes a vector and a range, as well as the # lookup matrix iota. The range represents the indices we care about in # the vector, since we split the vector in two parts, one for the PEs # and one for the channels. We basically take the vector with the least # distance to the one we want to approximate and that's our approximation. # @nb.njit(fastmath=True, cache=True) def _f_base_approx(vec, rg, iota): min = np.inf idx = -1 for i in range(rg[0], rg[1]): distsq = 0 for j in range(vec.shape[0]): distsq += (iota[i, j] - vec[j]) ** 2 # we don't need to take the square root, # since we just care about the minimizing index if distsq < min: min = distsq idx = i return iota[idx] # For the general case we do the splitting into a mapping of proceses to PEs # and a mapping of channels to primitives. That's why we have the two values, # split_k and split_d. The value k is for the number of PEs and primitives, # and split_k tells us where the PEs end and the primitives start. The d # value, on the other hand, represents the number of processes+channels, # and split_d accordingly tells us where the processes end and the channels # start @nb.njit(fastmath=True, parallel=True, cache=True) def _f_emb_approx(vec, d, k, split_d, split_k, iota, n): res = np.empty((d, k)) for i in nb.prange(d): comp =
np.empty(k)
numpy.empty
# script import requests from bs4 import BeautifulSoup import pandas as pd import numpy as np #import wquantiles as weighted # suppress the SettingWithCopyWarning pd.options.mode.chained_assignment = None # nfl model code class NFLModelBase: # initialize def __init__(self): pass # scrape schedule def scrape_nfl_schedule(self, int_year=2019): # get url r = requests.get(f'https://www.pro-football-reference.com/years/{int_year}/games.htm') # get content of page soup = BeautifulSoup(r.content, 'html.parser') # get all table rows table_rows = soup.find_all('tr') # instantiate empty list list_dict_row = [] # for each row for a, row in enumerate(table_rows): # instantiate empty dictionary dict_row = {} # get td elements td = row.find_all('td') # may need logic in case td is empty list # get week str_week = row.find('th').text # try converting to int try: int_week = int(str_week) # if week is not able to be converted to int except: # logic if str_week == 'WildCard': int_week = 18 elif str_week == 'Division': int_week = 19 elif str_week == 'ConfChamp': int_week = 20 elif str_week == 'SuperBowl': int_week = 21 else: # skip iteration continue # save to dict_row dict_row['week'] = int_week # get winning team str_winning_team = td[3].find('a').text dict_row['winning_team'] = str_winning_team # get game location str_game_loc = td[4].text dict_row['game_loc'] = str_game_loc # get losing team str_losing_team = td[5].find('a').text dict_row['losing_team'] = str_losing_team # get winning team points int_winning_points = int(td[7].text) dict_row['winning_team_points'] = int_winning_points # get losing team points int_losing_team_points = int(td[8].text) dict_row['losing_team_points'] = int_losing_team_points # append row to list_dict_row list_dict_row.append(dict_row) # put into df df_schedule_results = pd.DataFrame(list_dict_row) # save to object self.df_schedule_results = df_schedule_results self.int_year = int_year # return self return self # prepare data def prepare_data(self): # empty list list_dict_row = [] # iterate through rows for row in self.df_schedule_results.iterrows(): # empty dict dict_row = {} # get series ser_row = row[1] # get game loc str_game_loc = ser_row['game_loc'] # logic if str_game_loc == '@': str_home_team = ser_row['losing_team'] str_away_team = ser_row['winning_team'] int_home_points = ser_row['losing_team_points'] int_away_points = ser_row['winning_team_points'] elif str_game_loc == '': str_home_team = ser_row['winning_team'] str_away_team = ser_row['losing_team'] int_home_points = ser_row['winning_team_points'] int_away_points = ser_row['losing_team_points'] elif str_game_loc == 'N': # super bowl continue # put into dict dict_row['week'] = ser_row['week'] dict_row['home_team'] = str_home_team dict_row['away_team'] = str_away_team dict_row['home_score'] = int_home_points dict_row['away_score'] = int_away_points # append list_dict_row.append(dict_row) # put into df df_prepped_data = pd.DataFrame(list_dict_row) # helper function to get winning team def get_winner(home_team, away_team, home_score, away_score): if home_score > away_score: return home_team elif home_score < away_score: return away_team else: return 'tie' # apply function df_prepped_data['winning_team'] = df_prepped_data.apply(lambda x: get_winner(home_team=x['home_team'], away_team=x['away_team'], home_score=x['home_score'], away_score=x['away_score']), axis=1) # drop unplayed games df_prepped_data = df_prepped_data.dropna(subset=['home_score']) # make col for year df_prepped_data['year'] = self.int_year # save to object self.df_prepped_data = df_prepped_data # return return self # define helper function (could get errors here) @staticmethod def win_pct_helper(list_teams_unique, df_prepped_data, int_year): # empty list list_dict_row = [] # iterate through teams for team in list_teams_unique: # empty dict dict_row = {} # subset to where home team or away team == team df_subset = df_prepped_data[(df_prepped_data['home_team'] == team) | (df_prepped_data['away_team'] == team)] # see how many times team is in winning_team int_n_wins = list(df_subset['winning_team']).count(team) # get number of games int_n_games = df_subset.shape[0] # get win pct flt_win_pct = int_n_wins / int_n_games # if we have zero win pct make it .01 if flt_win_pct == 0: flt_win_pct = 0.01 # put into dict_row dict_row['team'] = team dict_row['win_pct'] = flt_win_pct # append to list list_dict_row.append(dict_row) # put into df df_win_pct = pd.DataFrame(list_dict_row) # make col for year df_win_pct['year'] = int_year # return return df_win_pct # get winning oct for year of home team def get_winning_pct_home(self, int_year_home=2019): # subset to int_year_home df_prepped_data_year = self.df_prepped_data[self.df_prepped_data['year']==int_year_home] # get all teams for int_year_home list_all_teams_year = list(df_prepped_data_year['home_team']) + list(df_prepped_data_year['away_team']) # rm dups list_teams_unique = list(dict.fromkeys(list_all_teams_year)) # use helper df_win_pct_home = self.win_pct_helper(list_teams_unique=list_teams_unique, df_prepped_data=df_prepped_data_year, int_year=int_year_home) # save to object self.int_year_home = int_year_home self.df_win_pct_home = df_win_pct_home # return return self # get winning pct for each team for weighting later def get_winning_pct_away(self, int_year_away=2019): # subset to int_year_away df_prepped_data_year = self.df_prepped_data[self.df_prepped_data['year']==int_year_away] # get all teams for int_year_away list_all_teams_year = list(df_prepped_data_year['home_team']) + list(df_prepped_data_year['away_team']) # rm dups list_teams_unique = list(dict.fromkeys(list_all_teams_year)) # use helper df_win_pct_away = self.win_pct_helper(list_teams_unique=list_teams_unique, df_prepped_data=df_prepped_data_year, int_year=int_year_away) # save to object self.int_year_away = int_year_away self.df_win_pct_away = df_win_pct_away # return return self # get predicted points scored by home team when they are home def get_points_scored_by_home_team(self, str_home_team='<NAME>', int_last_n_games=4, bool_weight_opp=True, int_n_simulations=1000): # subset to year df_prepped_data_year = self.df_prepped_data[self.df_prepped_data['year']==self.int_year_home] # get all the games where the home_team was home df_prepped_data_year_home = df_prepped_data_year[(df_prepped_data_year['home_team'] == str_home_team)] # save to object at this stage so we dont have to subset again later self.df_prepped_data_year_home_copy = df_prepped_data_year_home.copy() # get n_rows int_n_rows = df_prepped_data_year_home.shape[0] # logic to prevent errors when subsetting games if int_last_n_games < int_n_rows: df_prepped_data_year_home = df_prepped_data_year_home.iloc[-int_last_n_games:] else: pass # if weighting each game by opponent win pct if bool_weight_opp: # merge with df_win_pct to get opponent win % df_prepped_data_year_home = pd.merge(left=df_prepped_data_year_home, right=self.df_win_pct_home, left_on='away_team', right_on='team', how='left') # save weights list_weights = list(df_prepped_data_year_home['win_pct']) # logic to catch potential errors if (np.sum(list_weights) == 0) or (not bool_weight_opp): # weight everything the same list_weights = [1 for x in df_prepped_data_year_home['win_pct']] # get median flt_home_score_avg = np.average(df_prepped_data_year_home['home_score'], weights=list_weights) # get random values from poisson distribution list_pred_home_score = list(np.random.poisson(flt_home_score_avg, int_n_simulations)) # save to object self.str_home_team = str_home_team self.int_n_simulations = int_n_simulations self.list_pred_home_score = list_pred_home_score # return return self # get predicted points scored by away team when they are away def get_points_scored_by_away_team(self, str_away_team='<NAME>', int_last_n_games=4, bool_weight_opp=True): # subset to year df_prepped_data_year = self.df_prepped_data[self.df_prepped_data['year']==self.int_year_away] # get all the games where the away team was away df_prepped_data_year_away = df_prepped_data_year[(df_prepped_data_year['away_team'] == str_away_team)] # save to object at this stage so we dont have to subset again self.df_prepped_data_year_away_copy = df_prepped_data_year_away.copy() # get n_rows int_n_rows = df_prepped_data_year_away.shape[0] # logic to prevent errors when subsetting games if int_last_n_games < int_n_rows: df_prepped_data_year_away = df_prepped_data_year_away.iloc[-int_last_n_games:] else: pass # if weighting each game by opponent win pct if bool_weight_opp: # merge with df_win_pct to get opponent win % df_prepped_data_year_away = pd.merge(left=df_prepped_data_year_away, right=self.df_win_pct_away, left_on='home_team', right_on='team', how='left') # save weights list_weights = list(df_prepped_data_year_away['win_pct']) # logic to catch potential errors if (np.sum(list_weights) == 0) or (not bool_weight_opp): # weight everything the same list_weights = [1 for x in df_prepped_data_year_away['win_pct']] # get median flt_away_score_avg = np.average(df_prepped_data_year_away['away_score'], weights=list_weights) # get random values from poisson distribution list_pred_away_score = list(np.random.poisson(flt_away_score_avg, self.int_n_simulations)) # save to object self.str_away_team = str_away_team self.list_pred_away_score = list_pred_away_score # return return self # get predicted points allowed by home team def get_points_allowed_by_home_team(self, int_last_n_games=4, bool_weight_opp=True): # get all the games where the home_team was home (df_prepped_data_year_home_copy) df_home = self.df_prepped_data_year_home_copy.copy() # get n_rows int_n_rows = df_home.shape[0] # logic to prevent errors when subsetting games if int_last_n_games < int_n_rows: df_home = df_home.iloc[-int_last_n_games:] else: pass # if weighting each game by opponent win pct if bool_weight_opp: # merge with df_win_pct to get opponent win % df_home = pd.merge(left=df_home, right=self.df_win_pct_home, left_on='away_team', right_on='team', how='left') # save weights list_weights = list(df_home['win_pct']) # logic to catch potential errors if (np.sum(list_weights) == 0) or (not bool_weight_opp): # weight everything the same list_weights = [1 for x in list_weights] # get median flt_home_score_allowed_avg = np.average(df_home['away_score'], weights=list_weights) # get random values from poisson distribution list_pred_home_score_allowed = list(np.random.poisson(flt_home_score_allowed_avg, self.int_n_simulations)) # save to object self.list_pred_home_score_allowed = list_pred_home_score_allowed # return return self # get predicted points allowed by away team def get_points_allowed_by_away_team(self, int_last_n_games=4, bool_weight_opp=True): # get all the games where the away team was away df_away = self.df_prepped_data_year_away_copy.copy() # get n_rows int_n_rows = df_away.shape[0] # logic to prevent errors when subsetting games if int_last_n_games < int_n_rows: df_away = df_away.iloc[-int_last_n_games:] else: pass # if weighting each game by opponent win pct if bool_weight_opp: # merge with df_win_pct to get opponent win % df_away = pd.merge(left=df_away, right=self.df_win_pct_away, left_on='home_team', right_on='team', how='left') # save weights list_weights = list(df_away['win_pct']) # logic to catch potential errors if (np.sum(list_weights) == 0) or (not bool_weight_opp): # weight everything the same list_weights = [1 for x in list_weights] # get median flt_away_score_allowed_avg = np.average(df_away['home_score'], weights=list_weights) # get random values from poisson distribution list_pred_away_score_allowed = list(np.random.poisson(flt_away_score_allowed_avg, self.int_n_simulations)) # save to object self.list_pred_away_score_allowed = list_pred_away_score_allowed # return return self # predict outcome def predict_outcome(self, bool_weight_opp=True): # put predictions into a df df_predictions = pd.DataFrame({'pred_points_scored_by_home': self.list_pred_home_score, 'pred_points_scored_by_away': self.list_pred_away_score, 'pred_points_allowed_by_home': self.list_pred_home_score_allowed, 'pred_points_allowed_by_away': self.list_pred_away_score_allowed}) # if weighting if bool_weight_opp: # get win pct for home flt_win_pct_home = self.df_win_pct_home[self.df_win_pct_home['team']==self.str_home_team]['win_pct'].iloc[0] # get win pct for away flt_win_pct_away = self.df_win_pct_away[self.df_win_pct_away['team']==self.str_away_team]['win_pct'].iloc[0] # put into list list_weights = [flt_win_pct_home, flt_win_pct_away] else: list_weights = [1,1] # home score prediction df_predictions['pred_home_score'] = df_predictions.apply(lambda x: np.average([x['pred_points_scored_by_home'], x['pred_points_allowed_by_away']], weights=list_weights), axis=1) # away score prediction df_predictions['pred_away_score'] = df_predictions.apply(lambda x:
np.average([x['pred_points_allowed_by_home'], x['pred_points_scored_by_away']], weights=list_weights)
numpy.average
# -*- coding: utf-8 -*- """ Created on Thu Apr 29 21:45:02 2021 @author: <NAME> -Spatial structure index value distribution of urban streetscape """ from mayavi import mlab from tvtk.api import tvtk # python wrappers for the C++ vtk ecosystem import numpy as np from mayavi import mlab from tvtk.api import tvtk import matplotlib.pyplot as plt # only for manipulating the input image import glob,os, pickle label_mapping={ 0:"pole", 1:"slight", 2:"bboard", 3:"tlight", 4:"car", 5:"truck", 6:"bicycle", 7:"motor", 8:"bus", 9:"tsignf", 10:"tsignb", 11:"road", 12:"sidewalk", 13:"curbcut", 14:"crosspln", 15:"bikelane", 16:"curb", 17:"fence", 18:"wall", 19:"building", 20:"person", 21:"rider", 22:"sky", 23:"vege", 24:"terrain", 25:"markings", 26:"crosszeb", 27:"Nan", } label_color={ 0:(117,115,102), #"pole", 1:(212,209,156),#"slight", 2:(224,9,9),#"bboard", 3:(227,195,66),#"tlight", 4:(137,147,169),#"car", 5:(53,67,98),#"truck", 6:(185,181,51),#"bicycle", 7:(238,108,91),#"motor", 8:(247,5,5),#"bus", 9:(127,154,82),#"tsignf", 10:(193,209,167),#"tsignb", 11:(82,83,76),#"road", 12:(141,142,133),#"sidewalk", 13:(208,212,188),#"curbcut", 14:(98,133,145),#"crosspln", 15:(194,183,61),#"bikelane", 16:(141,139,115),#"curb", 17:(157,186,133),#"fence", 18:(114,92,127),#"wall", 19:(78,61,76),#"building", 20:(100,56,67),#"person", 21:(240,116,148),#"rider", 22:(32,181,191),#"sky", 23:(55,204,26),#"vege", 24:(84,97,82),#"terrain", 25:(231,24,126),#"markings", 26:(141,173,166),#"crosszeb", 27:(0,0,0),#"Nan", } def auto_sphere(image_file): # create a figure window (and scene) fig = mlab.figure(size=(600, 600)) # load and map the texture img = tvtk.JPEGReader() img.file_name = image_file texture = tvtk.Texture(input_connection=img.output_port, interpolate=1) # print(texture) # (interpolate for a less raster appearance when zoomed in) # use a TexturedSphereSource, a.k.a. getting our hands dirty R = 1 Nrad = 180 # create the sphere source with a given radius and angular resolution sphere = tvtk.TexturedSphereSource(radius=R, theta_resolution=Nrad, phi_resolution=Nrad) # print(sphere) # assemble rest of the pipeline, assign texture sphere_mapper = tvtk.PolyDataMapper(input_connection=sphere.output_port) sphere_actor = tvtk.Actor(mapper=sphere_mapper, texture=texture) fig.scene.add_actor(sphere_actor) mlab.show() def manual_sphere(image_file): # caveat 1: flip the input image along its first axis img = plt.imread(image_file) # shape (N,M,3), flip along first dim outfile = image_file.replace('.jfif', '_flipped.jpg') # flip output along first dim to get right chirality of the mapping img = img[::-1,...] plt.imsave(outfile, img) image_file = outfile # work with the flipped file from now on # parameters for the sphere R = 1 # radius of the sphere Nrad = 180 # points along theta and phi phi = np.linspace(0, 2 * np.pi, Nrad) # shape (Nrad,) theta = np.linspace(0, np.pi, Nrad) # shape (Nrad,) phigrid,thetagrid = np.meshgrid(phi, theta) # shapes (Nrad, Nrad) # compute actual points on the sphere x = R * np.sin(thetagrid) * np.cos(phigrid) y = R * np.sin(thetagrid) * np.sin(phigrid) z = R * np.cos(thetagrid) # create figure mlab.figure(size=(600, 600)) # create meshed sphere mesh = mlab.mesh(x,y,z) mesh.actor.actor.mapper.scalar_visibility = False mesh.actor.enable_texture = True # probably redundant assigning the texture later # load the (flipped) image for texturing img = tvtk.JPEGReader(file_name=image_file) texture = tvtk.Texture(input_connection=img.output_port, interpolate=0, repeat=0) # print(texture) mesh.actor.actor.texture = texture # tell mayavi that the mapping from points to pixels happens via a sphere mesh.actor.tcoord_generator_mode = 'sphere' # map is already given for a spherical mapping cylinder_mapper = mesh.actor.tcoord_generator # caveat 2: if prevent_seam is 1 (default), half the image is used to map half the sphere cylinder_mapper.prevent_seam = 0 # use 360 degrees, might cause seam but no fake data #cylinder_mapper.center = np.array([0,0,0]) # set non-trivial center for the mapping sphere if necessary def mpl_sphere(image_file): import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D img = plt.imread(image_file) # define a grid matching the map size, subsample along with pixels theta = np.linspace(0, np.pi, img.shape[0]) phi = np.linspace(0, 2*np.pi, img.shape[1]) print(img.shape) print(theta.shape) print(phi.shape) #''' count =180 #180 # keep 180 points along theta and phi theta_inds = np.linspace(0, img.shape[0] - 1, count).round().astype(int) phi_inds = np.linspace(0, img.shape[1] - 1, count).round().astype(int) # print(theta_inds) theta = theta[theta_inds] phi = phi[phi_inds] print(theta.shape) print(phi.shape) img = img[np.ix_(theta_inds, phi_inds)] print("_"*50) print(img.shape) #''' theta,phi = np.meshgrid(theta, phi) print(theta.shape,phi.shape) R = 1 # sphere x = R * np.sin(theta) * np.cos(phi) y = R * np.sin(theta) * np.sin(phi) z = R * np.cos(theta) # create 3d Axes fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.plot_surface(x.T, y.T, z.T, facecolors=img/255, cstride=1, rstride=1) # we've already pruned ourselves # make the plot more spherical ax.axis('scaled') plt.show() def spherical_segs_pts_show(label_seg_fn,label_color): from tqdm import tqdm import pickle import numpy as np from skimage.io._plugins.pil_plugin import ndarray_to_pil, pil_to_ndarray from PIL import Image,ImageOps fig=mlab.figure(size=(600, 600)) print(label_seg_fn) with open(label_seg_fn,'rb') as f: label_seg=pickle.load(f).numpy() print('\nseg shape={}'.format(label_seg.shape)) # define a grid matching the map size, subsample along with pixels theta=np.linspace(0, np.pi, label_seg.shape[0]) phi=np.linspace(0, 2*np.pi, label_seg.shape[1]) print("theta shape={};phi shape={}".format(theta.shape,phi.shape)) theta,phi=np.meshgrid(theta, phi) print("theta shape={};phi shape={}".format(theta.shape,phi.shape)) label_seg_color=np.array([label_color[v] for v in label_seg.flatten()]).reshape((label_seg.shape[0],label_seg.shape[1],3)) print("\nlabel_seg_color shape={}".format(label_seg_color.shape)) R=10 # sphere x=R * np.sin(theta) * np.cos(phi) y=R * np.sin(theta) * np.sin(phi) z=R * np.cos(theta) print("x,y,z shape={},{},{}".format(x.shape,y.shape,z.shape)) mask=label_seg==22 # print(len(np.extract(mask,x.T)),len(np.extract(mask,y.T)),len(np.extract(mask,z.T)),len(np.extract(mask,label_seg_color[:,:,0]/255))) mlab.points3d(x.T, y.T, z.T, label_seg_color[:,:,0]/255,) #opacity=0.75,scale_factor=0.1 # mlab.points3d(np.extract(mask,x.T),np.extract(mask,y.T),np.extract(mask,z.T),) theta_phi=np.dstack((theta,phi)) mlab.show() def spherical_segs_object_changing(label_seg_path,label_color): from tqdm import tqdm import glob,os import pickle import numpy as np from skimage.io._plugins.pil_plugin import ndarray_to_pil, pil_to_ndarray from PIL import Image,ImageOps # fig=mlab.figure(size=(600, 600)) label_seg_fns=glob.glob(os.path.join(label_seg_path,'*.pkl')) # print(label_seg_fns) for label_seg_fn in tqdm(label_seg_fns): print(label_seg_fn) with open(label_seg_fn,'rb') as f: label_seg=pickle.load(f).numpy() print('\nseg shape={}'.format(label_seg.shape)) # define a grid matching the map size, subsample along with pixels theta=np.linspace(0, np.pi, label_seg.shape[0]) phi=np.linspace(0, 2*np.pi, label_seg.shape[1]) print("theta shape={};phi shape={}".format(theta.shape,phi.shape)) theta,phi=np.meshgrid(theta, phi) print("theta shape={};phi shape={}".format(theta.shape,phi.shape)) label_seg_color=np.array([label_color[v] for v in label_seg.flatten()]).reshape((label_seg.shape[0],label_seg.shape[1],3)) print("\nlabel_seg_color shape={}".format(label_seg_color.shape)) R=10 # sphere x=R * np.sin(theta) * np.cos(phi) y=R * np.sin(theta) * np.sin(phi) z=R * np.cos(theta) print("x,y,z shape={},{},{}".format(x.shape,y.shape,z.shape)) mask=label_seg==22 # print(len(np.extract(mask,x.T)),len(np.extract(mask,y.T)),len(np.extract(mask,z.T)),len(np.extract(mask,label_seg_color[:,:,0]/255))) # mlab.points3d(x.T, y.T, z.T, label_seg_color[:,:,0]/255,) #opacity=0.75,scale_factor=0.1 # mlab.show() # mlab.points3d(np.extract(mask,x.T),np.extract(mask,y.T),np.extract(mask,z.T),) theta_phi=np.dstack((theta,phi)) break def fns_sort(fns_list): from pathlib import Path fns_dict={int(Path(p).stem.split('_')[-1]):p for p in fns_list} fns_dict_key=list(fns_dict.keys()) fns_dict_key.sort() fns_dict_sorted=[fns_dict[k] for k in fns_dict_key] return fns_dict_sorted def panorama_object_change(label_seg_path,label_color): from tqdm import tqdm import glob,os import pickle import numpy as np from skimage.io._plugins.pil_plugin import ndarray_to_pil, pil_to_ndarray from PIL import Image,ImageOps from pathlib import Path import pandas as pd from sklearn import preprocessing label_seg_fns=glob.glob(os.path.join(label_seg_path,'*.pkl')) label_seg_fns_sorted=fns_sort(label_seg_fns) pixels={} # i=0 for label_seg_fn in tqdm(label_seg_fns_sorted): # print(label_seg_fn) with open(label_seg_fn,'rb') as f: label_seg=pickle.load(f).numpy() # print('\nseg shape={}'.format(label_seg.shape)) fn_stem=Path(label_seg_fn).stem fn_key,fn_idx=fn_stem.split("_") pixels[fn_stem]=label_seg.flatten() # if i==10:break # i+=1 img_pixels_df=pd.DataFrame.from_dict(pixels,orient='index') pixels_diff=img_pixels_df.diff() pixels_diff[pixels_diff!=0]=1 # print(img_pixels_df) pixels_diff_sum=pixels_diff.sum(axis=0) pixels_diff_array=np.array(pixels_diff_sum).reshape(label_seg.shape) min_max_scaler=preprocessing.MinMaxScaler() pixels_diff_array_standardization=min_max_scaler.fit_transform(pixels_diff_array) img_object_change=Image.fromarray(np.uint8(pixels_diff_array_standardization * 255) , 'L') img_object_change.save('./processed data/img_object_change.jpg') with open('./processed data/pixels_diff_array_standardization.pkl','wb') as f: pickle.dump(pixels_diff_array_standardization,f) with open('./processed data/pixels_diff_array.pkl','wb') as f: pickle.dump(pixels_diff_array,f) return img_object_change,pixels_diff_array_standardization def spherical_img_pts_show(panorama_fn,FOV=False): from tqdm import tqdm import pickle,math import numpy as np from skimage.io._plugins.pil_plugin import ndarray_to_pil, pil_to_ndarray from PIL import Image,ImageOps import numpy.ma as ma from PIL import Image img=plt.imread(panorama_fn) print('\nseg shape={}'.format(img.shape)) # define a grid matching the map size, subsample along with pixels theta=np.linspace(0, np.pi, img.shape[0]) phi=np.linspace(0, 2*np.pi, img.shape[1]) print("theta shape={};phi shape={}".format(theta.shape,phi.shape)) theta,phi=np.meshgrid(theta, phi) theta=theta.T phi=phi.T print("theta shape={};phi shape={}".format(theta.shape,phi.shape)) theta_phi=np.dstack((theta,phi)) if FOV==True: verticalFOV_limit_ofVisual_field=[50,90-(-70)] horizontalFOV_visual_limit_field=[62,90-(-62)] horizontal_offset=0 verticalFOV_limit_ofVisual_field_radians=[math.radians(d) for d in verticalFOV_limit_ofVisual_field] horizontalFOV_visual_limit_field_radians=[math.radians(d) for d in horizontalFOV_visual_limit_field] horizontal_offset_radians=math.radians(horizontal_offset) print(verticalFOV_limit_ofVisual_field_radians,horizontalFOV_visual_limit_field_radians,horizontal_offset_radians) mask=np.bitwise_and(theta>=verticalFOV_limit_ofVisual_field_radians[0], theta<=verticalFOV_limit_ofVisual_field_radians[1]) theta=theta[mask] phi=phi[mask] img=img[mask] R=50 # sphere x=R * np.sin(theta) * np.cos(phi) y=R * np.sin(theta) * np.sin(phi) z=R * np.cos(theta) print("x,y,z shape={},{},{}".format(x.shape,y.shape,z.shape)) # print(img) fig=mlab.figure(size=(600, 600),bgcolor=(1, 1, 1)) mlab.points3d(x, y, z, img/255,scale_factor=.25) #opacity=0.75,scale_factor=0.1 mlab.points3d(0, 0, 0,scale_factor=3,color=(1,0,0)) # Plot the equator and the tropiques theta_equator=np.linspace(0, 2 * np.pi, 100) veiw_scope_dic={} for i,angle in enumerate([-math.radians(70), 0, math.radians(50)]): x_equator=R * np.cos(theta_equator) * np.cos(angle) y_equator=R * np.sin(theta_equator) * np.cos(angle) z_equator=R * np.ones_like(theta_equator) * np.sin(angle) mlab.plot3d(x_equator, y_equator, z_equator, color=(0, 0, 0),opacity=0.6, tube_radius=None) veiw_scope_dic[i]=[x_equator,y_equator,z_equator] str_info={0:'lower limit of visual filed:-70',1:'Standard line of sight:0',2:'Upper limit of visual filed:+50'} for k,v in str_info.items(): mlab.text(veiw_scope_dic[k][0][0], veiw_scope_dic[k][1][0], v, z=veiw_scope_dic[k][2][0],width=0.025 * len(v), name=v,color=(0,0,0)) vertical_label_radians=np.linspace(0, np.pi,14) vertical_label_degree=["{:.2f}".format(90-math.degrees(radi)) for radi in vertical_label_radians] phi_label=0 for idx in range(len(vertical_label_radians)): theta_labe=vertical_label_radians[idx] x_label=R * np.sin(theta_labe) * np.cos(phi_label) y_label=R * np.sin(theta_labe) * np.sin(phi_label) z_label=R * np.cos(theta_labe) mlab.points3d(x_label, y_label, z_label,scale_factor=1,color=(0,0,0)) label=vertical_label_degree[idx] mlab.text(x_label, y_label, label, z=z_label,width=0.02 * len(label), name=label,color=(0,0,0)) mlab.show() def array_classifier(array,n_classes=9): import mapclassify as mc import numpy as np import pandas as pd from PIL import Image from sklearn import preprocessing array_shape=array.shape array_flatten=array.flatten() classifier=mc.NaturalBreaks(array_flatten,k=n_classes) print(classifier) classifications=pd.DataFrame(array).apply(classifier) classifications_array=classifications.to_numpy().reshape(array_shape) min_max_scaler=preprocessing.MinMaxScaler() classifications_array_standardization=min_max_scaler.fit_transform(classifications_array) classifications_object_change=Image.fromarray(
np.uint8(classifications_array_standardization * 255)
numpy.uint8
# tindar.py from typing import Optional from pulp import * import numpy as np from pathlib import Path from custom_timer import Timer import itertools import json PROJECT_DIR = str(Path(__file__).resolve().parents[1]) class Tindar: '''Class to solve Tindar pairing problems Input ----- love_matrix: np.array square matrix indicating which person is interested in which other person tindar_problem: instance of TindarGenerator ''' INIT_ERROR_MSG = "Cannot initialise with love_matrix AND tindar_problem" def __init__(self, love_matrix=None, tindar_problem=None): if love_matrix is not None: assert tindar_problem is None, INIT_ERROR_MSG self.check_init(love_matrix) self.love_matrix = love_matrix self.n = love_matrix.shape[0] if tindar_problem is not None: assert love_matrix is None, INIT_ERROR_MSG self.tindar_problem = tindar_problem self.love_matrix = tindar_problem.love_matrix self.n = tindar_problem.n self.connectedness = tindar_problem.connectedness self.p = tindar_problem.p self.x_names = [f"x_{i}_{j}" for i in range(self.n) for j in range(self.n)] self.x = [LpVariable(name=x_name, cat="Binary") for x_name in self.x_names] self.x_np = np.array(self.x).reshape((self.n, self.n)) def __repr__(self): if self.tindar_problem is None: return f"Tindar with n={self.n}" else: return str(self.tindar_problem.__repr__()) @staticmethod def check_init(love_matrix): # type check if not isinstance(love_matrix, np.ndarray): raise ValueError("love_matrix is not a numpy array") # shape check m, n = love_matrix.shape if m != n: raise ValueError(f"love_matrix is not square: love_matrix.shape" f"= {love_matrix.shape}") # diagonal zero check for i in range(n): if love_matrix[i, i] != 0: raise ValueError("love_matrix diagonal contains nonzeros") # Symmetry constraints: if one is paired, the other is paired def create_symmetry_constraints(self, inplace=True): tups = [(i, j) for i in range(self.n) for j in range(i+1, self.n)] # Left-hand side lhs_symmetry = [ LpAffineExpression( [(self.x_np[tup[0], tup[1]], 1), (self.x_np[tup[1], tup[0]], -1)], name=f"lhs_sym_{tup[0]}_{tup[1]}" ) for tup in tups ] # Constraints constraints_symmetry = [ LpConstraint( e=lhs_s, sense=0, name=f"constraint_sym_{tups[i][0]}_{tups[i][1]}", rhs=0 ) for i, lhs_s in enumerate(lhs_symmetry) ] # Verification if len(constraints_symmetry) != (self.n**2-self.n)/2: raise Exception( "Symmetry constraints not constructed right:" f"love_matrix.shape = {self.love_matrix.shape}," f"len(constraints_symmetry) should be {(self.n**2-self.n)/2}" f", actually is {len(constraints_symmetry)}" ) # Function behaviour if inplace: # object is modified, no return value self.constraints_symmetry = constraints_symmetry else: # only result is returned return constraints_symmetry # Feasibility constraints: only pairs if person likes the other def create_like_constraints(self, inplace=True): tups = [(i, j) for i in range(self.n) for j in range(self.n)] # Left-hand side lhs_like = [ LpAffineExpression( [(self.x_np[tup[0], tup[1]], 1)], name=f"lhs_like_{tup[0]}_{tup[1]}" ) for tup in tups ] # Constraints constraints_like = [ LpConstraint( e=lhs_l, sense=-1, name=f"constraint_like_{tups[i][0]}_{tups[i][1]}", rhs=self.love_matrix[tups[i][0], tups[i][1]] ) for i, lhs_l in enumerate(lhs_like) ] # Verification if len(constraints_like) != self.n**2: raise Exception( "Liking constraints not constructed right:" f"A.shape = {self.love_matrix.shape}, len(constraints_like)" f"should be {self.n**2}, actually is {len(constraints_like)}" ) # Function behaviour if inplace: # object is modified, no return value self.constraints_like = constraints_like else: # only result is returned return constraints_like # Single assignment: one person can have at most one other person def create_single_assignment_constraints(self, inplace=True): # Left-hand side: rowsum <= 1 lhs_single_rowsum = [ LpAffineExpression( [(self.x_np[i, j], 1) for j in range(self.n)], name=f"lhs_single_rowsum_{i}" ) for i in range(self.n) ] # Left-hand side: colsum <= 1 lhs_single_colsum = [ LpAffineExpression( [(self.x_np[i, j], 1) for i in range(self.n)], name=f"lhs_single_colsum_{j}" ) for j in range(self.n) ] # Constraints constraints_single_rowsum = self.make_single_constraints( lhs_single_rowsum, "rowsum") constraints_single_colsum = self.make_single_constraints( lhs_single_colsum, "colsum") # Verification self.check_single_constraints(constraints_single_rowsum, "rowsum") self.check_single_constraints(constraints_single_colsum, "colsum") # Function behaviour if inplace: # object is modified, no return value self.constraints_single_rowsum = constraints_single_rowsum self.constraints_single_colsum = constraints_single_colsum else: # only result is returned return constraints_single_rowsum, constraints_single_colsum # Auxiliary functions for single assigment constraints @staticmethod def make_single_constraints(lhs_single, kind): constraints_single = [ LpConstraint( e=lhs_s, sense=-1, name=f"constraint_single_{kind}_{i}", rhs=1 ) for i, lhs_s in enumerate(lhs_single) ] return constraints_single def check_single_constraints(self, constraints_single, kind): if len(constraints_single) != self.n: raise Exception( f"Constraints single {kind} not constructed right:" f"A.shape = {self.love_matrix.shape}, " f"len(constraints_single_{kind}) should be {self.n}, " f"actually is {len(constraints_single)}" ) def create_all_constraints(self): self.create_symmetry_constraints() self.create_like_constraints() self.create_single_assignment_constraints() self.constraints_all = [ *self.constraints_symmetry, *self.constraints_like, *self.constraints_single_rowsum, *self.constraints_single_colsum ] def create_problem(self): # Initialize constraints and objective self.create_all_constraints() self.objective = LpAffineExpression([(x_i, 1) for x_i in self.x]) # Create PuLP problem self.prob_pulp = LpProblem("The_Tindar_Problem", LpMaximize) self.prob_pulp += self.objective for c in self.constraints_all: self.prob_pulp += c def write_problem(self, path=PROJECT_DIR+"/models/Tindar.lp"): self.prob_pulp.writeLP(path) def solve_problem(self, kind="pulp"): if kind == "pulp": self.prob_pulp.solve() elif kind == "heuristic": self.x_heuristic_np = np.zeros((self.n, self.n)) for i in range(self.n - 1): if self.x_heuristic_np[i, :].sum() == 0: done = False j = i + 1 while not done: mutual_interest = ( (self.love_matrix[i, j] == 1) and (self.love_matrix[j, i] == 1) ) available = (self.x_heuristic_np[j, :] == 0).all() if mutual_interest and available: self.x_heuristic_np[i, j] = 1 self.x_heuristic_np[j, i] = 1 done = True if j == self.n - 1: done = True else: j += 1 else: raise ValueError( f"kind {kind} not allowed" "choose from: pulp, heuristic" ) def solution_status(self, kind="pulp", verbose=True): if kind == "pulp": stat = LpStatus[self.prob_pulp.status] if verbose: print("Status:", stat) return stat elif kind == "heuristic": stat = "Solved (optimal unsure)" print("Heuristic always solves") return stat else: raise ValueError( f"kind {kind} not allowed" "choose from: pulp, heuristic" ) def _pulp_solution_to_np(self, pulp_vars=None): if pulp_vars is None: pulp_vars = self.prob_pulp.variables() solution_np = np.array( [v.value() for v in pulp_vars] ).reshape((self.n, self.n)) return solution_np def solution_vars(self, kind="pulp", verbose=True): if kind == "pulp": vars_pulp = self.prob_pulp.variables() vars_np = self._pulp_solution_to_np(vars_pulp) if verbose: print(vars_np) return vars_np elif kind == "heuristic": if verbose: print(self.x_heuristic_np) return self.x_heuristic_np def solution_obj(self, kind="pulp", verbose=True): if kind == "pulp": obj = value(self.prob_pulp.objective) elif kind == "heuristic": obj = self.x_heuristic_np.sum() if verbose: print(f"Number of lovers connected by {kind} = ", obj) return obj class TindarGenerator: '''Class to generate Tindar objects randomly n: integer number of people in the model connectedness: 1 < integer < 10 connectedness of the Tindar problem for humans, implemented as bernouilli probability for edges to be generated ''' MIN_CONNECTEDNESS = 1 MAX_CONNECTEDNESS = 10 MIN_EDGE_PROB = 0.05 MAX_EDGE_PROB = 0.75 UNIF_LOW = 0.3 UNIF_HIGH = 0.6 def __init__(self, n, connectedness=None, nan_probability=None, generation_kind="simple", attractiveness_distr="uniform", unif_low=UNIF_LOW, unif_high=UNIF_HIGH): self.check_init(n, connectedness) self.n = n self.connectedness = connectedness self.nan_probability = nan_probability self.generation_kind = generation_kind self.attractiveness_distr = attractiveness_distr self.unif_low = unif_low self.unif_high = unif_high self.create_love_matrix() def __repr__(self): return (f"Tindar problem with n={self.n}, connectedness= " f"{self.connectedness}, p={self.p}") @staticmethod def ROMANCE_LEVEL_FN(attractiveness_level): return (1/(attractiveness_level+1))**1.5 - 0.2 # Input validation @classmethod def check_init(self, n, connectedness): # n if not isinstance(n, int): raise ValueError(f"TindarGenerator init error: " f"type(n) = {type(n)}") if n <= 0: raise ValueError(f"TindarGenerator init error: " f"n={n} < 0") # connectedness if not (isinstance(connectedness, (int, float)) or connectedness is None): raise ValueError(f"TindarGenerator init error: " f"type(connectedness) = {type(connectedness)}") if connectedness is not None: if not (self.MIN_CONNECTEDNESS <= connectedness <= self.MAX_CONNECTEDNESS): raise ValueError(f"TindarGenerator init error: " f"connectedness={connectedness} not between 1 and 10") @classmethod def bernouilli_parameter(self, connectedness): diff_scaled = (connectedness-self.MIN_CONNECTEDNESS)/self.MAX_CONNECTEDNESS return (diff_scaled*self.MAX_EDGE_PROB) + self.MIN_EDGE_PROB @classmethod def _create_interesting_love_values(self, n, attractiveness_distr=None, unif_low=None, unif_high=None): # Sample attractiveness levels nu = np.random.uniform(low=unif_low, high=unif_high, size=n) nu[nu < 0] = 0 nu[nu > 1] = 1 # Calculate corresponding romance levels mu = np.array([self.ROMANCE_LEVEL_FN(n) for n in nu]) mu[mu < 0] = 0 mu[mu > 1] = 1 # Compute love interests mu_colvec = mu.reshape((-1, 1)) nu_rowvec = nu.reshape((1, -1)) love_values =
np.dot(mu_colvec, nu_rowvec)
numpy.dot
import numpy as np import vectormath as vmath from sortedcontainers import SortedList # constants INFINITY = 2 ** 63 # integer max (python 3 has no bound) DEBUG = False ############################################################################ # sub classes for algorithm # ############################################################################ # sub triangle data (vertex indexes, coordinates, scales, precomputed Matrix) class Triangle: def __init__(self, v1, v2, v3): # 3 vertices for a trangle self.nVerts = [v1, v2, v3] self.vTriCoords = [] # 2D position (x,y) self.vScaled = np.zeros((3, 2), dtype=float) # un-scaled triangle # GMatrix: pre-computed matrices for triangle scaling step self.mF = self.mC = [[]] # simply 2D coordinate # class Vertex: # def __init__(self, x, y): # self.x, self.y = x, y class Constraint: def __init__(self, nVertex, vec): self.nVertex = nVertex self.vConstrainedPos = vec def __lt__(self, other): return self.nVertex < other.nVertex # LU-decomp, matrix and pivot class LUData: # information of LU decompositions def __init__(self, matrix, vPivots): self.mLU = matrix self.vPivots = vPivots ############################################################################## # global variables : m is member variable # @TODO make it as a class ############################################################################### m_bSetupValid = None m_mFirstMatrix = None # G' matrix m_vConstraints = SortedList() m_vInitialVerts = [] # initial positions of points m_vDeformedVerts = [] # current deformed positions of points m_vTriangles = [] # contains deformed triangles m_vVertexMap = [] # m_vVertexMap m_mHXPrime, m_mHYPrime = None, None # m_mHXPrime, m_mHYPrime m_mDX, m_mDY = None, None # m_mDX, m_mDY m_mLUDecompX, m_mLUDecompY = None, None # m_mLUDecompX, m_mLUDecompY # functions def Error(): print("ERROR") exit() def _invalidateSetup(): # global m_bSetupValid m_bSetupValid = False def _getInitialVert(nVert, Verts): ret = vmath.Vector2(float(Verts[nVert][0]), float(Verts[nVert][1])) return ret def _normalize(vec): l = vec.length return vec / l def _squared_length(vec): return vec.length * vec.length def _extractSubMatrix(mFrom, nRowOffset, nColOffset, row, col): ret = np.zeros((row, col), dtype=float) for i in range(row): for j in range(col): ret[i][j] = mFrom[i + nRowOffset][j + nColOffset] return ret #################################################################### # Static Matrices # #################################################################### # # 1. scale-free transfrom matrix # def _precomputeOrientationMatrix(): if DEBUG: print("\nprecomputeOrientationMatrix()") # m_vConstraints = shared.m_vConstraints # put constraints into vConstraintVec vConstraintVec = [] for i in range(len(m_vConstraints)): vConstraintVec.append(m_vConstraints[i]) # resize matrix and clear to zero nVerts = len(m_vDeformedVerts) G = np.zeros((nVerts * 2, nVerts * 2), dtype=float) # G' matrix in eqn (8) nConstraints = len(vConstraintVec) nFreeVerts = nVerts - nConstraints if DEBUG: print("nConstraints =", nConstraints, ", Free =", nFreeVerts) # figure out vertices ordering. First free vertices and then constraints nRow = 0 m_vVertexMap = np.zeros(nVerts, dtype=int) for i in range(nVerts): c = Constraint(i, [0.0, 0.0]) if m_vConstraints.count(c) > 0: continue m_vVertexMap[i] = nRow nRow += 1 if nRow != nFreeVerts: Error() for i in range(nConstraints): m_vVertexMap[vConstraintVec[i].nVertex] = nRow nRow += 1 if nRow != nVerts: Error() # test vectors gUTest = np.zeros(nVerts * 2, dtype=float) for i in range(nVerts): c = Constraint(i, [0.0, 0.0]) if m_vConstraints.count(c) > 0: continue Row = m_vVertexMap[i] gUTest[Row * 2] = m_vInitialVerts[i][0] gUTest[Row * 2 + 1] = m_vInitialVerts[i][1] for i in range(nConstraints): Row = m_vVertexMap[vConstraintVec[i].nVertex] gUTest[Row * 2] = vConstraintVec[i].vConstrainedPos[0] gUTest[Row * 2 + 1] = vConstraintVec[i].vConstrainedPos[1] # fill matrix line = 1 nTri = len(m_vTriangles) for i in range(nTri): t = m_vTriangles[i] fTriSumErr = 0 # Error of the triangles for j in range(3): fTriErr = 0 # Error of the subtriangles n0x = 2 * m_vVertexMap[t.nVerts[j]] n0y = n0x + 1 n1x = 2 * m_vVertexMap[t.nVerts[(j + 1) % 3]] n1y = n1x + 1 n2x = 2 * m_vVertexMap[t.nVerts[(j + 2) % 3]] n2y = n2x + 1 x, y = t.vTriCoords[j][0], t.vTriCoords[j][1] v0 = vmath.Vector2(float(gUTest[n0x]), float(gUTest[n0y])) v1 = vmath.Vector2(float(gUTest[n1x]), float(gUTest[n1y])) v2 = vmath.Vector2(float(gUTest[n2x]), float(gUTest[n2y])) v01 = v1 - v0 v01Perp = vmath.Vector2(v01[1], -v01[0]) vTest = v0 + x * v01 + y * v01Perp fDist = (vTest - v2).dot(vTest - v2) """ add line = 1 for debug print("debug line", line, ":", x, y) print("debug line", line, ":", v0[0], v0[1]) print("debug line", line, ":", v1[0], v1[1]) print("debug line", line, ":", v2[0], v2[1]) print("debug line", line, ":", v01[0], v01[1]) print("debug line", line, ":", v01Perp[0], v01Perp[1]) print("debug line", line, ":", vTest[0], vTest[1]) line += 1 if fDist > 0.0001: Error() """ G[n0x][n0x] += 1 - 2 * x + x * x + y * y G[n0x][n1x] += 2 * x - 2 * x * x - 2 * y * y G[n0x][n1y] += 2 * y G[n0x][n2x] += -2 + 2 * x G[n0x][n2y] += -2 * y fTriErr += (1 - 2 * x + x * x + y * y) * gUTest[n0x] * gUTest[n0x] fTriErr += (2 * x - 2 * x * x - 2 * y * y) * \ gUTest[n0x] * gUTest[n1x] fTriErr += (2 * y) * gUTest[n0x] * gUTest[n1y] fTriErr += (-2 + 2 * x) * gUTest[n0x] * gUTest[n2x] fTriErr += (-2 * y) * gUTest[n0x] * gUTest[n2y] G[n0y][n0y] += 1 - 2 * x + x * x + y * y G[n0y][n1x] += -2 * y G[n0y][n1y] += 2 * x - 2 * x * x - 2 * y * y G[n0y][n2x] += 2 * y G[n0y][n2y] += -2 + 2 * x fTriErr += (1 - 2 * x + x * x + y * y) * gUTest[n0y] * gUTest[n0y] fTriErr += (-2 * y) * gUTest[n0y] * gUTest[n1x] fTriErr += (2 * x - 2 * x * x - 2 * y * y) * \ gUTest[n0y] * gUTest[n1y] fTriErr += (2 * y) * gUTest[n0y] * gUTest[n2x] fTriErr += (-2 + 2 * x) * gUTest[n0y] * gUTest[n2y] G[n1x][n1x] += x * x + y * y G[n1x][n2x] += -2 * x G[n1x][n2y] += 2 * y fTriErr += (x * x + y * y) * gUTest[n1x] * gUTest[n1x] fTriErr += (-2 * x) * gUTest[n1x] * gUTest[n2x] fTriErr += (2 * y) * gUTest[n1x] * gUTest[n2y] G[n1y][n1y] += x * x + y * y G[n1y][n2x] += -2 * y G[n1y][n2y] += -2 * x fTriErr += (x * x + y * y) * gUTest[n1y] * gUTest[n1y] fTriErr += (-2 * y) * gUTest[n1y] * gUTest[n2x] fTriErr += (-2 * x) * gUTest[n1y] * gUTest[n2y] G[n2x][n2x] += 1 G[n2y][n2y] += 1 fTriErr += gUTest[n2x] * gUTest[n2x] + gUTest[n2y] * gUTest[n2y] fTriSumErr += fTriErr gUTemp = np.matmul(G, gUTest) fSum = gUTemp.dot(gUTest) # print("(test) Residual =", fSum) # extract G00 matrix G00 = np.zeros((2 * nFreeVerts, 2 * nFreeVerts), dtype=float) dim = np.shape(G00) row, col = dim[0], dim[1] G00 = _extractSubMatrix(G, 0, 0, row, col) # extract G01 and G10 matrices G01 = np.zeros((2 * nFreeVerts, 2 * nConstraints), dtype=float) dim = np.shape(G01) row, col = dim[0], dim[1] G01 = _extractSubMatrix(G, 0, 2 * nFreeVerts, row, col) G10 = np.zeros((2 * nConstraints, 2 * nFreeVerts), dtype=float) dim = np.shape(G10) row, col = dim[0], dim[1] G10 = _extractSubMatrix(G, 2 * nFreeVerts, 0, row, col) # compute GPrime = G00 + Transpose(G00) and B = G01 + Transpose(G10) eqn (8) GPrime = G00 + np.transpose(G00) B = G01 + np.transpose(G10) # invert GPrime and final result = -GPrimeInverse * B GPrimeInverse = np.linalg.inv(GPrime) mFinal = np.matmul(GPrimeInverse, B) return -mFinal # checked: gUTest, m_vVertexMap, G, G00, G01, G10, GPrime, B, GPrimeInverse, mFinal # # LUDecompostion for Scale Matrix calculation # def _LUDecompose(mMatrix, vDecomp): # return tuple(ifSquare, vDecomp) dim = np.shape(mMatrix) row, col = dim[0], dim[1] if row != col: return False, vDecomp # initialize vDecomp vDecomp = LUData(np.zeros((row, row), dtype=float), np.zeros(row, int)) vPivots = vDecomp.vPivots # need to assign value back mLUMatrix = vDecomp.mLU # need to assign value back mLUMatrix = mMatrix # scaling of each row dRowSwaps, dTemp = 1, None vScale =
np.zeros(row, dtype=float)
numpy.zeros
import pandas as pd import numpy as np import config class Result: """ A class used to represent a Result. Attributes ---------- ticker : sequence The stock ticker. data : dataframe The historical data associated with the ticker. strategy : Strategy An instance of the Strategy class. buy_transactions: sequence List of buy transactions. sell_transactions: sequence List of sell transactions. buy_transaction_equity: sequence List of equity values corresponding to the buy transactions. sell_transaction_equity: sequence List of equity values corresponding to the sell transactions. Performance : Performance An instance of the Performance class. transactions : numeric The required multiple of the 20D MA volume to generate a buy signal. Methods ------- performance_as_dict() Returns the performance results in a dictionary. tech_indicators() Augments the data attribute with columns for technical indicators. buy_and_sell_signals() Calculate signals where they can be vectorised. trade() Enters and exit positions based on buy/sell signals. calculate_returns() Calculate returns after the trade method has been executed. print_results() Print the performance results to the console. """ def __init__(self, ticker, strategy, raw_data): self.ticker = ticker self.data = raw_data self.strategy = strategy self.tech_indicators() self.buy_and_sell_signals() self.buy_transactions, self.sell_transactions, self.buy_transaction_equity, self.sell_transaction_equity = self.trade() self.Performance = self.calculate_returns() self.transactions = len(self.buy_transactions + self.sell_transactions) self.print_results() def performance_as_dict(self): """Returns the performance results in a dictionary. Parameters ---------- Raises ------ """ return {'ticker': self.ticker, 'strategy': "Strategy(" + str(self.strategy.required_profit) + ", " + str( self.strategy.required_pct_change_min) + ", " + str(self.strategy.required_pct_change_max) + ", " + str( self.strategy.required_volume) + ")", 'annualised_return': self.Performance.annualised_return, 'annualised_return_ref': self.Performance.annualised_return_ref, 'end_date': self.Performance.end_date, 'end_price': self.Performance.end_price, 'gain': self.Performance.gain, 'gain_ref': self.Performance.gain_ref, 'start_date': self.Performance.start_date, 'start_price': self.Performance.start_price} def tech_indicators(self): """Augments the data attribute with columns for technical indicators. Parameters ---------- Raises ------ """ self.data = self.data.assign(close_MA_50=self.data[["close"]].ewm(span=50).mean()) self.data = self.data.assign(close_MA_200=self.data[["close"]].ewm(span=200).mean()) self.data = self.data.assign(volume_MA_20=self.data[["volume"]].rolling(20).mean()) self.data = self.data.assign( price_change_buy=self.data['close'].pct_change().between(self.strategy.required_pct_change_min, self.strategy.required_pct_change_max)) self.data = self.data.assign( volume_change_buy=(self.data["volume"] > self.strategy.required_volume * self.data["volume_MA_20"])) # Money Flow Index (MFI) typical_price = (self.data["high"] + self.data["low"] + self.data["close"]) / 3 money_flow = typical_price * self.data["volume"] delta = money_flow - money_flow.shift(1) delta = pd.Series([0 if np.isnan(x) else x for x in delta]) positive_money_flow = pd.Series([x if x > 0 else 0 for x in delta]) negative_money_flow = pd.Series([abs(x) if x < 0 else 0 for x in delta]) positive_money_flow_sum = positive_money_flow.rolling(window=14).sum().values negative_money_flow_sum = negative_money_flow.rolling(window=14).sum().values with np.errstate(divide='ignore', invalid='ignore'): money_ratio = positive_money_flow_sum / negative_money_flow_sum money_flow_index = 100 - 100 / (1 + money_ratio) self.data = self.data.assign(MFI=money_flow_index) # Relative Strength Index (RSI) delta = self.data["close"] - self.data["close"].shift(1) delta = pd.Series([0 if np.isnan(x) else x for x in delta]) up = pd.Series([x if x > 0 else 0 for x in delta]) down = pd.Series([abs(x) if x < 0 else 0 for x in delta]) with np.errstate(divide='ignore', invalid='ignore'): rs = up.rolling(window=14).mean().values / down.rolling(window=14).mean().values relative_strength_index = 100 - 100 / (1 + rs) self.data = self.data.assign(RSI=relative_strength_index) # Stochastic Oscillator stochastic_oscillator = pd.Series( (self.data["close"] - self.data["close"].rolling(window=14, center=False).min()) / ( self.data["close"].rolling(window=14, center=False).max() - self.data["close"].rolling(window=14, center=False).min())) stochastic_oscillator = 100 * stochastic_oscillator.rolling(window=3).mean() self.data = self.data.assign(STO=stochastic_oscillator) # Bollinger Bands rolling_mean = self.data[["close"]].ewm(span=50).mean() rolling_std = self.data[["close"]].ewm(span=50).std() self.data = self.data.assign(BB_upper=rolling_mean + (rolling_std * 2)) self.data = self.data.assign(BB_lower=rolling_mean - (rolling_std * 2)) return def buy_and_sell_signals(self): """Calculate signals where they can be vectorised. Generation of sell signal requires iterating through the data which is done in the trade method. Parameters ---------- Raises ------ """ self.data = self.data.assign(buy_signal=np.nan, sell_signal=np.nan, buy_signal_date=np.nan, sell_signal_date=np.nan) buy_prices = self.data["close"].iloc[np.where(self.data["volume_change_buy"] & self.data["price_change_buy"])] buy_dates = self.data["date"].iloc[np.where(self.data["volume_change_buy"] & self.data["price_change_buy"])] self.data = self.data.assign(buy_signal=buy_prices) self.data = self.data.assign(buy_signal_date=buy_dates) return def trade(self): """Enters and exit positions based on buy/sell signals. Parameters ---------- Raises ------ """ buy_transactions, buy_transaction_equity, sell_transactions, sell_transaction_equity = ([] for i in range(4)) open_long_position, buy_and_hold, buy_and_hold_shares, buy_and_hold, buy_and_hold_shares, shares = ( 0, 0, 0, 0, 0, 0) buy_and_hold_position_array, open_long_position_array, strategy_equity_array, buy_and_hold_equity_array = ( np.full(len(self.data["close"].values), np.nan) for i in range(4)) # Create buy signal and buy signal dates without NaN or NaT (NaN and NaT inclusive arrays required for plots) buy_signal_array_nonan = self.data["buy_signal"].values[~np.isnan(self.data["buy_signal"].values)] buy_signal_array_dates_nonat = self.data["buy_signal_date"].values[ ~
np.isnat(self.data["buy_signal_date"].values)
numpy.isnat
from skimage.feature import canny import scipy.ndimage as ndimage import cv2 import gdal import numpy as np from skimage.morphology import skeletonize from skimage import measure from skimage.measure import regionprops from multiprocessing import pool, cpu_count from multiprocessing.dummy import Pool as ThreadPool #define variables featureList=[] maxArea=0 xy=[] areaList = [] indexList =[] maskBB = [] maskHeight = [] maskLabel = [] maskStd = [] neighbours=0 regionbb=0 mask=0 regionval=0 smallerThan=0 kernel = np.ones((20,20),np.uint8) def findStdSmaller(regionIndex): global smallerThan pos = np.where(maskLabel==neighbours[regionIndex]) mask2 = (regionbb == maskLabel[pos]).astype(np.uint8)*255 mask2 = cv2.dilate(mask2,kernel,iterations = 1)>0 mask2 = np.multiply(mask2,mask>0) mask2 = np.multiply(mask2,regionval) hData = mask2[np.where(mask2>0)] if len(hData)>0: h = np.mean(hData) if h<maskHeight[pos]+2: smallerThan=smallerThan+1 def findDEMFeature(original_dem,index): global featureList,maxArea,xy,areaList,indexList,maskBB,maskHeight,maskLabel,maskStd,\ neighbours,regionbb,mask,regionval,smallerThan,kernel height,width=original_dem.shape region = regionprops(index, original_dem,cache = True) number_regions=len(region) for i in range(0,number_regions): if region[i].area>10000: areaList.append(region[i].area) indexList.append(i) maskBB.append(region[i].bbox) maskLabel.append(region[i].label) maskHeight.append(region[i].mean_intensity) xy = region[i].coords std = np.std(original_dem[xy[:,0],xy[:,1]]) maskStd.append(std) areaList = np.array(areaList) indexList = np.array(indexList) maskBB = np.array(maskBB) maskHeight = np.array(maskHeight) maskLabel = np.array(maskLabel) maskStd = np.array(maskStd) order = np.argsort(-areaList)#minus for decending areaList = areaList[order] indexList = indexList[order] maskBB = maskBB[order] maskHeight = maskHeight[order] maskLabel = maskLabel[order] maskStd = maskStd[order] for regionIndex in range(0,int(len(areaList)/10)): minr, minc, maxr, maxc = maskBB[regionIndex] extraMargin = 20 if minr-extraMargin<0: minr=0 else: minr=minr-extraMargin if minc-extraMargin<0: minc=0 else: minc=minc-extraMargin if maxr+extraMargin>height: maxr=height else: maxr=maxr+extraMargin if maxc+extraMargin>width: maxc=width else: maxc=maxc+extraMargin regionbb = index[minr:maxr,minc:maxc] mask = (regionbb == maskLabel[regionIndex]).astype(np.uint8)*255 contours = cv2.findContours(mask, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)[1] holeData = [] if len(contours)-1>0: for j in range(0,len(contours)-1): cnt = contours[j+1] pos = cnt[0] area = cv2.contourArea(cnt) if area>1000: holeData.append(cv2.contourArea(contours[j+1])) if len(holeData)>0: number_holes = len(holeData) holeData = np.sort(holeData) avgHole =
np.mean(holeData,dtype=np.int)
numpy.mean
"""Core functionality including loading audio from disk, computing envelope, generating sounds.""" from dataclasses import dataclass from math import ceil, floor, inf from typing import Callable, List, Optional, Tuple import matplotlib.pyplot as plt import numpy as np import scipy.io.wavfile import scipy.signal import sounddevice from pyresynth import utils @dataclass class Axis: """A class for representing an axis. Constructor parameters: step: Spacing between values. start: Start value. The default value is 0. """ step: float start: float = 0 def range(self, length: int) -> np.ndarray: """Return NumPy array of values forming an axis.""" stop = self.start + length * self.step return
np.linspace(self.start, stop, length, endpoint=False)
numpy.linspace
import math import numpy as np import torch from sklearn.metrics import average_precision_score, roc_auc_score, f1_score def recall(node, rank, top_k): rank = rank[:, :top_k] recall = np.array([node[i] in a for i, a in enumerate(rank)]) recall = recall.sum() / recall.size return recall def MRR(node, rank): rank = rank.cpu() mrr = np.array([(np.where(a == node[i])) for i, a in enumerate(rank)]) mrr = (1 / (mrr + 1)).mean() return mrr def get_target(src_embedding, dest_embedding, src_batch): cos_similarity = torch.matmul(src_embedding[src_batch], dest_embedding.T) cos_similarity, idx = torch.sort(cos_similarity, descending=True) return cos_similarity, idx def eval_edge_prediction(model, neg_edge_sampler, data, n_neighbors, batch_size=200, use_recall=False): assert neg_edge_sampler.seed is not None neg_edge_sampler.reset_random_state() val_ap, val_macro_auc, val_micro_auc, val_macro_f1, val_micro_f1 = [], [], [], [], [] val_mrr, val_recall_20, val_recall_50 = [], [], [] with torch.no_grad(): model = model.eval() TEST_BATCH_SIZE = batch_size num_test_instance = len(data.sources) num_test_batch = math.ceil(num_test_instance / TEST_BATCH_SIZE) for k in range(num_test_batch): start_idx = k * TEST_BATCH_SIZE end_idx = min(num_test_instance, start_idx + TEST_BATCH_SIZE) size = end_idx - start_idx src_batch = data.sources[start_idx:end_idx] dest_batch = data.destinations[start_idx:end_idx] edge_idx_batch = data.edge_idxs[start_idx:end_idx] timestamp_batch = data.timestamps[start_idx:end_idx] _, neg_batch = neg_edge_sampler.sample(size) pos_prob, neg_prob = model.compute_edge_probabilities(source_nodes=src_batch, destination_nodes=dest_batch, negative_nodes=neg_batch, edge_times=timestamp_batch, edge_idxs=edge_idx_batch, n_neighbors=n_neighbors, is_test=False) # src_embedding = src_embedding.detach() # dest_embedding = dest_embedding.detach() src_embedding = dest_embedding = model.memory.memory pred_label = np.concatenate([(pos_prob).cpu().numpy(), (neg_prob).cpu().numpy()]) true_label = np.concatenate([np.ones(size), np.zeros(size)]) val_ap.append(average_precision_score(true_label, pred_label)) val_macro_auc.append(roc_auc_score(true_label, pred_label, average='macro')) val_micro_auc.append(roc_auc_score(true_label, pred_label, average='micro')) val_macro_f1.append(f1_score(true_label, np.array(pred_label >= 0.5, dtype=int), average='macro')) val_micro_f1.append(f1_score(true_label, np.array(pred_label >= 0.5, dtype=int), average='micro')) if use_recall: cos_similarity, dest_rank = get_target(src_embedding, dest_embedding, src_batch) cos_similarity, src_rank = get_target(dest_embedding, src_embedding, dest_batch) recall_20 = (recall(dest_batch, dest_rank, 20) + recall(src_batch, src_rank, 20)) / 2 recall_50 = (recall(dest_batch, dest_rank, 50) + recall(src_batch, src_rank, 50)) / 2 mrr = (MRR(dest_batch, dest_rank) + MRR(src_batch, src_rank)) / 2 val_mrr.append(mrr) val_recall_20.append(recall_20) val_recall_50.append(recall_50) else: val_mrr.append(0) val_recall_20.append(0) val_recall_50.append(0) return np.mean(val_ap), np.mean(val_macro_auc), np.mean(val_micro_auc), np.mean(val_macro_f1), np.mean( val_micro_f1), np.mean(val_mrr), np.mean(val_recall_20),
np.mean(val_recall_50)
numpy.mean
import pytest import numpy as np from GPdoemd.design_criteria import _reshape, HR, BH, BF, AW, JR N = 15 M = 3 E = 2 mu = np.random.randn(N,M,E) s2 = 0.1 * np.random.rand(N,M,E,E) s2 += s2.transpose(0,1,3,2) + np.array([[np.eye(E)]*M]*N) noisevar = 0.1 * np.eye(E) pps = np.ones( M ) / M # Set biggest divergence mu[13] = ( 1 + np.arange(M*E).reshape((M,E)) ) * 10 s2[13] = np.array( [ 0.001 * np.eye(E)] * M ) """ TESTS """ class TestDesignCriteria: def test_HR(self): d = HR(mu,s2,noisevar,pps) assert d.shape == (N,) assert
np.argmax(d)
numpy.argmax
import logging import librosa import numpy as np import pandas as pd from common.utilities import KnownRequestParseError logger = logging.getLogger(__name__) # Resample all uploaded files to this sample rate. Ideally, should match the SR used for training. SUPPORTED_SAMPLE_RATE = 22050 # Duration of a single unit of recognition. The input file will be split to chunks of this size. SECONDS_PER_CHUNK = 1.0 # Signal with RMS lower than this value will be considered silence. ABSOLUTE_SILENCE_RMS_THRESHOLD = 1e-5 # Signal with RMS lower than this percentile in the input file will be considered silence. ADAPTIVE_SILENCE_RMS_PERCENTILE = 25 def is_chunk_silent(rms_chunk, adaptive_threshold): """ Determines whether the specified audio segment is silent or not. Parameters ---------- rms_chunk : numpy.array A 1D vector of RMS values for the chunk. adaptive_threshold : float An RMS threshold below which the audio is considered silent. Returns ------- bool """ mean_rms = np.mean(rms_chunk) return mean_rms < ABSOLUTE_SILENCE_RMS_THRESHOLD or mean_rms < adaptive_threshold def featurize_chroma_chunk(chunk): """ Extract features from a chromagram segment. Parameters ---------- chunk : numpy.array A 2D array (*, 12) representing the chromagram for the chunk. Returns ------- numpy.array Extracted 1D feature vector. """ return np.mean(chunk, axis=1) def featurize_file(filename): """ Extracts audio features from the specified audio file. Parameters ---------- filename : str Path to a saved audio file. Returns ------- pandas.DataFrame A data frame with extracted audio features, one line for each SECONDS_PER_CHUNK seconds. """ try: logger.info(f'Reading audio file: "{str(filename)}"') signal, sample_rate = librosa.load(filename, sr=SUPPORTED_SAMPLE_RATE) except Exception as e: error_desc = str(e) or e.__class__.__name__ raise KnownRequestParseError('Cannot load audio file. Error: ' + error_desc) duration = len(signal) / sample_rate logger.info(f'File duration: {duration:.1f} seconds') spectrogram = np.abs(librosa.stft(signal)) spectrogram_per_second = spectrogram.shape[1] / duration logger.info(f'Spectrogram shape: {spectrogram.shape}') rms = librosa.feature.rms(S=spectrogram).T.ravel() chroma = librosa.feature.chroma_stft(S=spectrogram, sr=sample_rate) adaptive_rms_threshold = np.percentile(rms, ADAPTIVE_SILENCE_RMS_PERCENTILE) # Split RMS and Chroma arrays into equally sized chunks, each taking SECONDS_PER_CHUNK. chunk_split_points =
np.arange(0, chroma.shape[-1], spectrogram_per_second * SECONDS_PER_CHUNK)
numpy.arange
""" See main/local_motivation_bert_agg.py module doc """ import json import os import shutil import tempfile from collections import defaultdict import numpy as np from absl import flags from .. import log from ..huggingface.run_classifier import main from ..params import GLUE_TASK_NAMES from ..sync import exists, sync, simplehash from ..utils import import_matplotlib, timeit def main(prefix, cache, overwrite, task): expected_files = [ "dev_loss.pdf", "train_loss.pdf", "dev_train_gap.pdf", "train_act.pdf", "train_nrm.pdf", "dev_act.pdf", "dev_nrm.pdf", ] for f in expected_files: f = os.path.join(prefix, f) if exists(f) and not overwrite: log.info("file {} exists but --overwrite is not specified", f) return workdir = cache or "/tmp/bert_agg_{}".format(simplehash(prefix)) log.info("work dir {}", workdir) with timeit(name="load results"): sync(prefix, workdir, '--exclude', '*', '--include', '*config.json', '--include', '*summary.json', '--include', '*average_activations.npy', '--include', '*average_norms.npy') _setup_main(workdir, task) with timeit(name="saving outputs"): sync(workdir, prefix, '--exclude', '*', '--include', '*.pdf') def _setup_main(workdir, task): """ Runs locally, saving all intended output to workdir """ loss = defaultdict(lambda: defaultdict(lambda: defaultdict(list))) # nested mapping is # eval (dev, train) -> attn (soft, topk, topk-50) -> k -> [loss] # list is over seeds # similar mapping, but only for topk # eval -> attn (soft, topk) -> k -> [marginal act/nrm] act = defaultdict(lambda: defaultdict(lambda: defaultdict(list))) nrm = defaultdict(lambda: defaultdict(lambda: defaultdict(list))) for root, dirnames, filenames in os.walk(workdir): if not _is_resultdir(root, dirnames, filenames): continue config = _get_json(os.path.join(root, "config.json")) attn = config["attn"] k = config["k"] for folder in ["dev", "train"]: dir_summary = _get_json(os.path.join(root, folder, "summary.json")) dir_act = np.load( os.path.join(root, folder, "average_activations.npy") ) dir_nrm = np.load(os.path.join(root, folder, "average_norms.npy")) loss[folder][attn][k].append(dir_summary["eval_loss"]) act[folder][attn][k].append(dir_act.mean(2).mean(1).mean(0)) nrm[folder][attn][k].append(dir_nrm.mean(1).mean(0)) trials = set() for folder in loss: for attn in loss[folder]: for k in loss[folder][attn]: trials.add(len(loss[folder][attn][k])) loss[folder][attn][k] = np.median(loss[folder][attn][k]) act[folder][attn][k] =
np.median(act[folder][attn][k])
numpy.median
import numpy as np from PuzzleLib.CPU.CPUArray import CPUArray from PuzzleLib.Intel.Wrappers import DNNL def instanceNorm2d(data, scale, bias, epsilon=1e-5): batchsize = data.shape[0] if batchsize > 1: extscale = CPUArray.toDevice(np.tile(scale.data, (batchsize, 1, 1))) extbias = CPUArray.toDevice(np.tile(bias.data, (batchsize, 1, 1))) else: extscale, extbias = scale, bias indata = data.reshape(1, batchsize * data.shape[1], data.shape[2], data.shape[3]) mean = CPUArray.empty((1, indata.shape[1], 1, 1), dtype=np.float32) var = CPUArray.empty((1, indata.shape[1], 1, 1), dtype=np.float32) outdata, savemean, savevar, desc = DNNL.batchNormNd(indata, extscale, extbias, mean, var, epsilon, test=False) return outdata.reshape(data.shape), savemean, savevar, extscale, extbias, desc def instanceNorm2dBackward(grad, data, extscale, extbias, savemean, savevar, epsilon, desc, affine=True): batchsize, maps = grad.shape[:2] outgrad = grad.reshape(1, batchsize * grad.shape[1], grad.shape[2], grad.shape[3]) indata = data.reshape(1, batchsize * data.shape[1], data.shape[2], data.shape[3]) ingrad, scalegrad, biasgrad = DNNL.batchNormNdBackward( indata, outgrad, extscale, extbias, savemean, savevar, desc, epsilon ) if affine and batchsize > 1: scalegrad = np.sum(scalegrad.data.reshape(batchsize, -1), axis=0).reshape((1, maps, 1, 1)) biasgrad = np.sum(biasgrad.data.reshape(batchsize, -1), axis=0).reshape((1, maps, 1, 1)) scalegrad = CPUArray(scalegrad.shape, scalegrad.dtype, data=scalegrad, acquire=True) biasgrad = CPUArray(biasgrad.shape, biasgrad.dtype, data=biasgrad, acquire=True) return (ingrad.reshape(grad.shape), scalegrad, biasgrad) if affine else ingrad.reshape(grad.shape) def unittest(): batchsize, maps, h, w = 3, 4, 5, 5 epsilon = 1e-5 data = CPUArray.toDevice(np.random.randn(batchsize, maps, h, w).astype(np.float32)) scale = CPUArray.toDevice(np.random.randn(1, maps, 1, 1).astype(np.float32)) bias = CPUArray.toDevice(np.random.randn(1, maps, 1, 1).astype(np.float32)) outdata, savemean, savevar, extscale, extbias, desc = instanceNorm2d(data, scale, bias, epsilon) hostData = data.get().reshape(data.shape[0] * data.shape[1], -1) hostScale, hostBias = scale.get().reshape(maps, 1), bias.get().reshape(maps, 1) hostExtScale, hostExtBias = np.tile(hostScale, (batchsize, 1)),
np.tile(hostBias, (batchsize, 1))
numpy.tile
import numpy as np from sympy import * def Tx(x = 0): """ Translation on «x» axis. Returns Dual Quaternion in matrix form """ return np.array([[1], [0], [0], [0], [0], [0.5 * x], [0], [0]]) def symbolicTx(x = 0): """ Translation on «x» axis. Returns Dual Quaternion in matrix form """ return Matrix([[1], [0], [0], [0], [0], [0.5 * x], [0], [0]]) def Ty(y = 0): """ Translation on «y» axis. Returns Dual Quaternion in matrix form """ return np.array([[1], [0], [0], [0], [0], [0], [0.5 * y], [0]]) def symbolicTy(y = 0): """ Translation on «y» axis. Returns Dual Quaternion in matrix form """ return Matrix([[1], [0], [0], [0], [0], [0], [0.5 * y], [0]]) def Tz(z = 0): """ Translation on «z» axis. Returns Dual Quaternion in matrix form """ return np.array([[1], [0], [0], [0], [0], [0], [0], [0.5 * z]]) def symbolicTz(z = 0): """ Translation on «z» axis. Returns Dual Quaternion in matrix form """ return Matrix([[1], [0], [0], [0], [0], [0], [0], [0.5 * z]]) def Rx(x = 0): """ Rotation on «x» axis. Returns Dual Quaternion in matrix form """ return np.array([[np.cos(x / 2)], [np.sin(x / 2)], [0], [0], [0], [0], [0], [0]]) def symbolicRx(x = 0): """ Rotation on «x» axis. Returns Dual Quaternion in matrix form """ return Matrix([[cos(x / 2)], [sin(x / 2)], [0], [0], [0], [0], [0], [0]]) def Ry(y = 0): """ Rotation on «y» axis. Returns Dual Quaternion in matrix form """ return np.array([[np.cos(y / 2)], [0], [np.sin(y / 2)], [0], [0], [0], [0], [0]]) def symbolicRy(y = 0): """ Rotation on «y» axis. Returns Dual Quaternion in matrix form """ return Matrix([[cos(y / 2)], [0], [sin(y / 2)], [0], [0], [0], [0], [0]]) def Rz(z = 0): """ Rotation on «z» axis. Returns Dual Quaternion in matrix form """ return np.array([[
np.cos(z / 2)
numpy.cos
################################################################################ # Copyright (C) 2013-2014 <NAME> # # This file is licensed under the MIT License. ################################################################################ """ Unit tests for gaussian_markov_chain module. """ import numpy as np from ..gaussian_markov_chain import GaussianMarkovChain from ..gaussian_markov_chain import VaryingGaussianMarkovChain from ..gaussian import Gaussian, GaussianMoments from ..gaussian import GaussianARD from ..gaussian import GaussianGamma from ..wishart import Wishart, WishartMoments from ..gamma import Gamma, GammaMoments from bayespy.utils import random from bayespy.utils import linalg from bayespy.utils import misc from bayespy.utils.misc import TestCase def kalman_filter(y, U, A, V, mu0, Cov0, out=None): """ Perform Kalman filtering to obtain filtered mean and covariance. The parameters of the process may vary in time, thus they are given as iterators instead of fixed values. Parameters ---------- y : (N,D) array "Normalized" noisy observations of the states, that is, the observations multiplied by the precision matrix U (and possibly other transformation matrices). U : (N,D,D) array or N-list of (D,D) arrays Precision matrix (i.e., inverse covariance matrix) of the observation noise for each time instance. A : (N-1,D,D) array or (N-1)-list of (D,D) arrays Dynamic matrix for each time instance. V : (N-1,D,D) array or (N-1)-list of (D,D) arrays Covariance matrix of the innovation noise for each time instance. Returns ------- mu : array Filtered mean of the states. Cov : array Filtered covariance of the states. See also -------- rts_smoother """ mu = mu0 Cov = Cov0 # Allocate memory for the results (N,D) = np.shape(y) X = np.empty((N,D)) CovX = np.empty((N,D,D)) # Update step for t=0 M = np.dot(np.dot(Cov, U[0]), Cov) + Cov L = linalg.chol(M) mu = np.dot(Cov, linalg.chol_solve(L, np.dot(Cov,y[0]) + mu)) Cov = np.dot(Cov, linalg.chol_solve(L, Cov)) X[0,:] = mu CovX[0,:,:] = Cov #for (yn, Un, An, Vn) in zip(y, U, A, V): for n in range(len(y)-1): #(yn, Un, An, Vn) in zip(y, U, A, V): # Prediction step mu = np.dot(A[n], mu) Cov = np.dot(np.dot(A[n], Cov), A[n].T) + V[n] # Update step M = np.dot(np.dot(Cov, U[n+1]), Cov) + Cov L = linalg.chol(M) mu = np.dot(Cov, linalg.chol_solve(L, np.dot(Cov,y[n+1]) + mu)) Cov = np.dot(Cov, linalg.chol_solve(L, Cov)) # Force symmetric covariance (for numeric inaccuracy) Cov = 0.5*Cov + 0.5*Cov.T # Store results X[n+1,:] = mu CovX[n+1,:,:] = Cov return (X, CovX) def rts_smoother(mu, Cov, A, V, removethis=None): """ Perform Rauch-Tung-Striebel smoothing to obtain the posterior. The function returns the posterior mean and covariance of each state. The parameters of the process may vary in time, thus they are given as iterators instead of fixed values. Parameters ---------- mu : (N,D) array Mean of the states from Kalman filter. Cov : (N,D,D) array Covariance of the states from Kalman filter. A : (N-1,D,D) array or (N-1)-list of (D,D) arrays Dynamic matrix for each time instance. V : (N-1,D,D) array or (N-1)-list of (D,D) arrays Covariance matrix of the innovation noise for each time instance. Returns ------- mu : array Posterior mean of the states. Cov : array Posterior covariance of the states. See also -------- kalman_filter """ N = len(mu) #n = N-1 # Start from the last time instance and smoothen backwards x = mu[-1,:] Covx = Cov[-1,:,:] for n in reversed(range(N-1)):#(An, Vn) in zip(reversed(A), reversed(V)): #n = n - 1 #if n <= 0: # break # The predicted value of n x_p = np.dot(A[n], mu[n,:]) Cov_p = np.dot(np.dot(A[n], Cov[n,:,:]), A[n].T) + V[n] # Temporary variable S = np.linalg.solve(Cov_p, np.dot(A[n], Cov[n,:,:])) # Smoothed value of n x = mu[n,:] + np.dot(S.T, x-x_p) Covx = Cov[n,:,:] + np.dot(np.dot(S.T, Covx-Cov_p), S) # Force symmetric covariance (for numeric inaccuracy) Covx = 0.5*Covx + 0.5*Covx.T # Store results mu[n,:] = x Cov[n,:] = Covx return (mu, Cov) class TestGaussianMarkovChain(TestCase): def create_model(self, N, D): # Construct the model Mu = Gaussian(np.random.randn(D), np.identity(D)) Lambda = Wishart(D, random.covariance(D)) A = Gaussian(np.random.randn(D,D), np.identity(D)) V = Gamma(D,
np.random.rand(D)
numpy.random.rand
import pyglet from pyglet.gl import * from .globs import * from .constants import * from . import config import ctypes import math from .colors import _getColor, color, blue try: import numpy npy = True numpy.seterr(divide='ignore') except: npy = False # exports __all__ = ['PImage', 'loadImage', 'image', 'get', 'setScreen', 'save', 'createImage', 'loadPixels', 'updatePixels', 'screenFilter', 'blend'] # the PImage class class PImage(object): """This basically wraps pyglet's AbstractImage with a Processing-like syntax.""" img = None # this is the actual AbstractImage def __init__(self, *args): """Either creates a new image from scratch or wraps an AbstractImage. Arguments are of the form PImage() PImage(width,height) PImage(width,height,format) PImage(img) """ if len(args) == 1 and isinstance(args[0], pyglet.image.AbstractImage): # Wraps an AbstractImage self.img = args[0] elif len(args) in (2, 3): # Creates an ImageData from width, height and type if len(args) == 2: # default w, h = args format = ARGB else: w, h, format = args data = create_string_buffer(w * h * len(format)) self.img = pyglet.image.ImageData(w, h, format, data.raw) else: assert (len(args) == 0) # Do an initial loading of the pixels[] array self.loadPixels() self.updatePixels() def loadPixels(self): """Gets the pixel data as an array of integers.""" n = self.width * self.height self.buf = self.img.get_image_data().get_data('BGRA', -self.width * 4) if npy: self.pixels = numpy.fromstring(self.buf, dtype=ctypes.c_uint) else: self.pixels = ctypes.cast(self.buf, ctypes.POINTER(ctypes.c_uint)) def filter(self, mode, *args): """Applies a filter to the image. The existant filters are: GRAY, INVERT, OPAQUE, THRESHOLD, POSTERIZE, ERODE, DILATE and BLUR. This method requires numpy.""" if not npy: raise ImportError("Numpy is required") if mode == GRAY: # Gray value = (77*(n>>16&0xff) + 151*(n>>8&0xff) + 28*(n&0xff)) >> 8 # Where n is the ARGB color of the pixel lum1 = numpy.multiply( numpy.bitwise_and(numpy.right_shift(self.pixels, 16), 0xff), 77) lum2 = numpy.multiply( numpy.bitwise_and(numpy.right_shift(self.pixels, 8), 0xff), 151) lum3 = numpy.multiply(numpy.bitwise_and(self.pixels, 0xff), 28) lum = numpy.right_shift(numpy.add(numpy.add(lum1, lum2), lum3), 8) self.pixels = numpy.bitwise_and(self.pixels, 0xff000000) self.pixels = numpy.bitwise_or(self.pixels, numpy.left_shift(lum, 16)) self.pixels = numpy.bitwise_or(self.pixels, numpy.left_shift(lum, 8)) self.pixels = numpy.bitwise_or(self.pixels, lum) elif mode == INVERT: # This is the same as applying an exclusive or with the maximum value self.pixels = numpy.bitwise_xor(self.pixels, 0xffffff) elif mode == BLUR: if not args: args = [3] # Makes the image square by adding zeros. # This avoids the convolution (via fourier transform multiplication) # from jumping to another extreme of the image when a border is reached if self.width > self.height: dif = self.width - self.height updif = numpy.zeros(self.width * dif / 2, dtype=numpy.uint32) downdif = numpy.zeros(self.width * (dif - dif / 2), dtype=numpy.uint32) self.pixels = numpy.concatenate((updif, self.pixels, downdif)) size = self.width elif self.width < self.height: dif = self.height - self.width leftdif = numpy.zeros(self.height * dif / 2, dtype=numpy.uint32) rightdif = numpy.zeros(self.height * (dif - dif / 2), dtype=numpy.uint32) self.pixels = self.pixels.reshape(self.height, self.width) self.pixels = numpy.transpose(self.pixels) self.pixels = self.pixels.reshape(self.width * self.height) self.pixels = numpy.concatenate( (leftdif, self.pixels, rightdif)) self.pixels = self.pixels.reshape(self.height, self.height) self.pixels = numpy.transpose(self.pixels) self.pixels = self.pixels.reshape(self.height * self.height) size = self.height else: size = self.height # Creates a gaussian kernel of the image's size _createKernel2d(args[0], size) # Divides the image's R, G and B channels, reshapes them # to square matrixes and applies two dimensional fourier transforms red = numpy.bitwise_and(numpy.right_shift(self.pixels, 16), 0xff) red = numpy.reshape(red, (size, size)) red = numpy.fft.fft2(red) green = numpy.bitwise_and(numpy.right_shift(self.pixels, 8), 0xff) green = numpy.reshape(green, (size, size)) green = numpy.fft.fft2(green) blue = numpy.bitwise_and(self.pixels, 0xff) blue = numpy.reshape(blue, (size, size)) blue = numpy.fft.fft2(blue) # Does a element-wise multiplication of each channel matrix # and the fourier transform of the kernel matrix kernel = numpy.fft.fft2(weights) red = numpy.multiply(red, kernel) green = numpy.multiply(green, kernel) blue = numpy.multiply(blue, kernel) # Reshapes them back to arrays and converts to unsigned integers red = numpy.reshape(numpy.fft.ifft2(red).real, size * size) green = numpy.reshape(numpy.fft.ifft2(green).real, size * size) blue = numpy.reshape(numpy.fft.ifft2(blue).real, size * size) red = red.astype(numpy.uint32) green = green.astype(numpy.uint32) blue = blue.astype(numpy.uint32) self.pixels = numpy.bitwise_or(numpy.left_shift(green, 8), blue) self.pixels = numpy.bitwise_or(numpy.left_shift(red, 16), self.pixels) # Crops out the zeros added if self.width > self.height: self.pixels = self.pixels[ self.width * dif / 2:size * size - self.width * ( dif - dif / 2)] elif self.width < self.height: self.pixels = numpy.reshape(self.pixels, (size, size)) self.pixels = numpy.transpose(self.pixels) self.pixels = numpy.reshape(self.pixels, size * size) self.pixels = self.pixels[ self.height * dif / 2:size * size - self.height * ( dif - dif / 2)] self.pixels = numpy.reshape(self.pixels, (self.width, self.height)) self.pixels = numpy.transpose(self.pixels) self.pixels = numpy.reshape(self.pixels, self.height * self.width) elif mode == OPAQUE: # This is the same as applying an bitwise or with the maximum value self.pixels = numpy.bitwise_or(self.pixels, 0xff000000) elif mode == THRESHOLD: # Maximum = max((n & 0xff0000) >> 16, max((n & 0xff00)>>8, (n & 0xff))) # Broken down to Maximum = max(aux,aux2) # The pixel will be white if its maximum is greater than the threshold # value, and black if not. This was implemented via a boolean matrix # multiplication. if not args: args = [0.5] thresh = args[0] * 255 aux = numpy.right_shift(numpy.bitwise_and(self.pixels, 0xff00), 8) aux = numpy.maximum(aux, numpy.bitwise_and(self.pixels, 0xff)) aux2 = numpy.right_shift(numpy.bitwise_and(self.pixels, 0xff0000), 16) boolmatrix = numpy.greater_equal(numpy.maximum(aux, aux2), thresh) self.pixels.fill(0xffffff) self.pixels = numpy.multiply(self.pixels, boolmatrix) elif mode == POSTERIZE: # New channel = ((channel*level)>>8)*255/(level-1) if not args: args = [8] levels1 = args[0] - 1 rlevel = numpy.bitwise_and(numpy.right_shift(self.pixels, 16), 0xff) glevel = numpy.bitwise_and(numpy.right_shift(self.pixels, 8), 0xff) blevel = numpy.bitwise_and(self.pixels, 0xff) rlevel = numpy.right_shift(numpy.multiply(rlevel, args[0]), 8) rlevel = numpy.divide(numpy.multiply(rlevel, 255), levels1) glevel = numpy.right_shift(numpy.multiply(glevel, args[0]), 8) glevel = numpy.divide(numpy.multiply(glevel, 255), levels1) blevel = numpy.right_shift(numpy.multiply(blevel, args[0]), 8) blevel = numpy.divide(numpy.multiply(blevel, 255), levels1) self.pixels = numpy.bitwise_and(self.pixels, 0xff000000) self.pixels = numpy.bitwise_or(self.pixels, numpy.left_shift(rlevel, 16)) self.pixels = numpy.bitwise_or(self.pixels, numpy.left_shift(glevel, 8)) self.pixels = numpy.bitwise_or(self.pixels, blevel) elif mode == ERODE: # Checks the pixels directly above, under and to the left and right # of each pixel of the image. If it has a greater luminosity, then # the center pixel receives its color colorOrig = numpy.array(self.pixels) colOut = numpy.array(self.pixels) colLeft = numpy.roll(colorOrig, 1) colRight = numpy.roll(colorOrig, -1) colUp = numpy.roll(colorOrig, self.width) colDown = numpy.roll(colorOrig, -self.width) currLum1 = numpy.bitwise_and(numpy.right_shift(colorOrig, 16), 0xff) currLum1 = numpy.multiply(currLum1, 77) currLum2 = numpy.bitwise_and(numpy.right_shift(colorOrig, 8), 0xff) currLum2 = numpy.multiply(currLum2, 151) currLum3 = numpy.multiply(numpy.bitwise_and(colorOrig, 0xff), 28) currLum = numpy.add(numpy.add(currLum1, currLum2), currLum3) lumLeft1 = numpy.bitwise_and(numpy.right_shift(colLeft, 16), 0xff) lumLeft1 = numpy.multiply(lumLeft1, 77) lumLeft2 = numpy.bitwise_and(numpy.right_shift(colLeft, 8), 0xff) lumLeft2 = numpy.multiply(lumLeft2, 151) lumLeft3 = numpy.multiply(numpy.bitwise_and(colLeft, 0xff), 28) lumLeft = numpy.add(numpy.add(lumLeft1, lumLeft2), lumLeft3) lumRight1 = numpy.bitwise_and(numpy.right_shift(colRight, 16), 0xff) lumRight1 = numpy.multiply(lumRight1, 77) lumRight2 = numpy.bitwise_and(numpy.right_shift(colRight, 8), 0xff) lumRight2 = numpy.multiply(lumRight2, 151) lumRight3 = numpy.multiply(numpy.bitwise_and(colRight, 0xff), 28) lumRight = numpy.add(numpy.add(lumRight1, lumRight2), lumRight3) lumDown1 = numpy.bitwise_and(numpy.right_shift(colDown, 16), 0xff) lumDown1 = numpy.multiply(lumDown1, 77) lumDown2 = numpy.bitwise_and(numpy.right_shift(colDown, 8), 0xff) lumDown2 = numpy.multiply(lumDown2, 151) lumDown3 = numpy.multiply(numpy.bitwise_and(colDown, 0xff), 28) lumDown = numpy.add(numpy.add(lumDown1, lumDown2), lumDown3) lumUp1 = numpy.bitwise_and(numpy.right_shift(colUp, 16), 0xff) lumUp1 = numpy.multiply(lumUp1, 77) lumUp2 = numpy.bitwise_and(numpy.right_shift(colUp, 8), 0xff) lumUp2 = numpy.multiply(lumUp2, 151) lumUp3 = numpy.multiply(numpy.bitwise_and(colUp, 0xff), 28) lumUp = numpy.add(numpy.add(lumUp1, lumUp2), lumUp3) numpy.putmask(colOut, lumLeft > currLum, colLeft) numpy.putmask(currLum, lumLeft > currLum, lumLeft) numpy.putmask(colOut, lumRight > currLum, colRight) numpy.putmask(currLum, lumRight > currLum, lumRight) numpy.putmask(colOut, lumUp > currLum, colUp) numpy.putmask(currLum, lumUp > currLum, lumUp) numpy.putmask(colOut, lumDown > currLum, colDown) numpy.putmask(currLum, lumDown > currLum, lumDown) self.pixels = colOut elif mode == DILATE: # Checks the pixels directly above, under and to the left and right # of each pixel of the image. If it has a lesser luminosity, then # the center pixel receives its color colorOrig = numpy.array(self.pixels) colOut = numpy.array(self.pixels) colLeft = numpy.roll(colorOrig, 1) colRight = numpy.roll(colorOrig, -1) colUp = numpy.roll(colorOrig, self.width) colDown = numpy.roll(colorOrig, -self.width) currLum1 = numpy.bitwise_and(numpy.right_shift(colorOrig, 16), 0xff) currLum1 = numpy.multiply(currLum1, 77) currLum2 = numpy.bitwise_and(numpy.right_shift(colorOrig, 8), 0xff) currLum2 = numpy.multiply(currLum2, 151) currLum3 = numpy.multiply(numpy.bitwise_and(colorOrig, 0xff), 28) currLum = numpy.add(numpy.add(currLum1, currLum2), currLum3) lumLeft1 = numpy.bitwise_and(numpy.right_shift(colLeft, 16), 0xff) lumLeft1 = numpy.multiply(lumLeft1, 77) lumLeft2 = numpy.bitwise_and(numpy.right_shift(colLeft, 8), 0xff) lumLeft2 = numpy.multiply(lumLeft2, 151) lumLeft3 = numpy.multiply(numpy.bitwise_and(colLeft, 0xff), 28) lumLeft = numpy.add(numpy.add(lumLeft1, lumLeft2), lumLeft3) lumRight1 = numpy.bitwise_and(numpy.right_shift(colRight, 16), 0xff) lumRight1 = numpy.multiply(lumRight1, 77) lumRight2 = numpy.bitwise_and(numpy.right_shift(colRight, 8), 0xff) lumRight2 = numpy.multiply(lumRight2, 151) lumRight3 = numpy.multiply(numpy.bitwise_and(colRight, 0xff), 28) lumRight = numpy.add(numpy.add(lumRight1, lumRight2), lumRight3) lumDown1 = numpy.bitwise_and(numpy.right_shift(colDown, 16), 0xff) lumDown1 = numpy.multiply(lumDown1, 77) lumDown2 = numpy.bitwise_and(numpy.right_shift(colDown, 8), 0xff) lumDown2 = numpy.multiply(lumDown2, 151) lumDown3 = numpy.multiply(numpy.bitwise_and(colDown, 0xff), 28) lumDown = numpy.add(numpy.add(lumDown1, lumDown2), lumDown3) lumUp1 = numpy.bitwise_and(numpy.right_shift(colUp, 16), 0xff) lumUp1 = numpy.multiply(lumUp1, 77) lumUp2 = numpy.bitwise_and(numpy.right_shift(colUp, 8), 0xff) lumUp2 =
numpy.multiply(lumUp2, 151)
numpy.multiply
import os import logging import numpy as np from PIL import Image from PIL import ImageOps os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" import tensorflow as tf tf.get_logger().setLevel(logging.ERROR) from tensorflow.keras.utils import Sequence, to_categorical from augmentation import augmentations ########################################################################## class DataGenerator(Sequence): def __init__(self, data, labels, img_dim=(32, 32,3), batch_size=32, num_classes=10, shuffle=True, jsd=True ): self.data = data self.labels = labels self.img_dim = img_dim self.batch_size = batch_size self.num_classes = num_classes self.shuffle = shuffle self.jsd = jsd self.augmentations = augmentations self.on_epoch_end() def on_epoch_end(self): self.indices = np.arange(len(self.data)) if self.shuffle: np.random.shuffle(self.indices) def apply_op(self, image, op, severity): image = np.clip(image * 255., 0, 255).astype(np.uint8) pil_img = Image.fromarray(image) # Convert to PIL.Image pil_img = op(pil_img, severity) return np.asarray(pil_img).astype(np.float32) / 255. def augment_and_mix(self, image, severity=3, width=3, depth=-1, alpha=1.): """Perform AugMix augmentations and compute mixture. Args: image: Raw input image as ndarray shape (h, w, c) severity: Severity of underlying augmentation operators (1-10). width: Width of augmentation chain depth: Depth of augmentation chain. -1 or (1, 3) alpha: Probability coefficient for Beta and Dirichlet distributions. Returns: mixed: Augmented and mixed image. """ ws = np.random.dirichlet([alpha] * width).astype(np.float32) m = np.float32(np.random.beta(alpha, alpha)) mix = np.zeros_like(image).astype(np.float32) for i in range(width): image_aug = image.copy() depth = depth if depth > 0 else
np.random.randint(1, 4)
numpy.random.randint
import numpy as np # numerical tools from scipy import integrate from scipy import interpolate c_light=299792.458#in km/s #Find nearest value def find_nearest(array,value): idx = (np.abs(array-value)).argmin() return array[idx] #### DATA SN def get_SN_info(targetname): data_sn=np.loadtxt('Info_SNe_KAIT.txt',usecols=[1,2,3,4,5,6,7]).transpose() name_SN_kait=np.array(np.genfromtxt('Info_SNe_KAIT.txt',usecols=[0],dtype='str')) ind_SN=np.where(np.array(name_SN_kait)==targetname)[0][0] A_V=data_sn[0][ind_SN] z_hel=data_sn[1][ind_SN]*1.0/c_light err_z_hel=data_sn[2][ind_SN]*1.0/c_light JD_explo=data_sn[3][ind_SN] err_JD_explo=data_sn[4][ind_SN] z_cmb=data_sn[5][ind_SN]*1.0/c_light err_z_cmb=data_sn[6][ind_SN]*1.0/c_light return A_V,z_hel,err_z_hel,JD_explo,err_JD_explo,z_cmb,err_z_cmb #Get SN photometry def get_sn(targetname): data_sn=open('Nat_KAIT/%s.txt'%targetname,'r') lines = data_sn.readlines() fields = lines[0].split() ind_B=np.where(np.array(fields)=='B')[0][0] ind_V=np.where(np.array(fields)=='V')[0][0] ind_R=np.where(np.array(fields)=='R')[0][0] ind_I=np.where(np.array(fields)=='I')[0][0] MJD = {} mags = {} emags = {} tel = {} for i in range(4): this_filter = ['B','V','R','I'] MJD[this_filter[i]] = [] mags[this_filter[i]] = [] emags[this_filter[i]] = [] tel[this_filter[i]] = [] for j in range(np.size(lines)): if (j!=0): if ((lines[j].split()[ind_B+1])<'0.8') and ((lines[j].split()[ind_B+1])!='NaN') and ((lines[j].split()[0][0])!='#'): mags['B'].append(float(lines[j].split()[ind_B])) emags['B'].append(float(lines[j].split()[ind_B+1])) MJD['B'].append(float(lines[j].split()[1])) tel['B'].append(lines[j].split()[3]) if ((lines[j].split()[ind_V+1])<'0.8') and ((lines[j].split()[ind_V+1])!='NaN')and ((lines[j].split()[0][0])!='#'): mags['V'].append(float(lines[j].split()[ind_V])) emags['V'].append(float(lines[j].split()[ind_V+1])) MJD['V'].append(float(lines[j].split()[1])) tel['V'].append(lines[j].split()[3]) if ((lines[j].split()[ind_R+1])<'0.8') and ((lines[j].split()[ind_R+1])!='NaN') and ((lines[j].split()[0][0])!='#'): mags['R'].append(float(lines[j].split()[ind_R])) emags['R'].append(float(lines[j].split()[ind_R+1])) MJD['R'].append(float(lines[j].split()[1])) tel['R'].append(lines[j].split()[3]) if ((lines[j].split()[ind_I+1])<'0.8') and ((lines[j].split()[ind_I+1])!='NaN') and ((lines[j].split()[0][0])!='#'): mags['I'].append(float(lines[j].split()[ind_I])) emags['I'].append(float(lines[j].split()[ind_I+1])) MJD['I'].append(float(lines[j].split()[1])) tel['I'].append(lines[j].split()[3]) for f in MJD: MJD[f],mags[f],emags[f],tel[f]=zip(*sorted(zip(MJD[f],mags[f],emags[f],tel[f]))) MJD[f] = np.array(MJD[f]) mags[f] = np.array(mags[f]) emags[f] = np.array(emags[f]) tel[f] = np.array(tel[f]) return MJD,mags,emags,tel #Linear interpolation of the magnitude def inter_mag(MJD,mags,emags): B_band=interpolate.interp1d(MJD['B'],mags['B']) B_band_plus=interpolate.interp1d(MJD['B'],mags['B']+emags['B']) V_band=interpolate.interp1d(MJD['V'],mags['V']) V_band_plus=interpolate.interp1d(MJD['V'],mags['V']+emags['V']) if np.size(MJD['R'])>0: R_band=interpolate.interp1d(MJD['R'],mags['R']) R_band_plus=interpolate.interp1d(MJD['R'],mags['R']+emags['R']) else: R_band=[] R_band_plus=[] I_band=interpolate.interp1d(MJD['I'],mags['I']) I_band_plus=interpolate.interp1d(MJD['I'],mags['I']+emags['I']) return B_band,B_band_plus,V_band,V_band_plus,R_band,R_band_plus,I_band,I_band_plus #Derive for each CSP filter the effective wavelength def effective_wavelength_csp(lam_spec,flux_spec,filter_name): ### Each transmission function ########### trans_u=np.loadtxt('Filters/CSP/u_swope.txt') lambda_u=trans_u[:,0] s_u=trans_u[:,1] trans_g=np.loadtxt('Filters/CSP/g_swope.txt') lambda_g=trans_g[:,0] s_g=trans_g[:,1] trans_r=np.loadtxt('Filters/CSP/r_swope.txt') lambda_r=trans_r[:,0] s_r=trans_r[:,1] trans_i=np.loadtxt('Filters/CSP/i_swope.txt') lambda_i=trans_i[:,0] s_i=trans_i[:,1] trans_V=np.loadtxt('Filters/CSP/V_swope.txt') lambda_V=trans_V[:,0] s_V=trans_V[:,1] trans_B=np.loadtxt('Filters/CSP/B_swope.txt') lambda_B=trans_B[:,0] s_B=trans_B[:,1] F_u_func=interpolate.interp1d(lambda_u,s_u) #interpolation Filtre u F_B_func=interpolate.interp1d(lambda_B,s_B) #interpolation Filtre B F_V_func=interpolate.interp1d(lambda_V,s_V) #interpolation Filtre V F_g_func=interpolate.interp1d(lambda_g,s_g) F_r_func=interpolate.interp1d(lambda_r,s_r) #interpolation Filtre t F_i_func=interpolate.interp1d(lambda_i,s_i) #interpolation Filtre i N_pt=3000 lambda_u=np.linspace(min(lambda_u),max(lambda_u),N_pt) lambda_B=np.linspace(min(lambda_B),max(lambda_B),N_pt) lambda_V=np.linspace(min(lambda_V),max(lambda_V),N_pt) lambda_g=np.linspace(min(lambda_g),max(lambda_g),N_pt) lambda_r=np.linspace(min(lambda_r),max(lambda_r),N_pt) lambda_i=np.linspace(min(lambda_i),max(lambda_i),N_pt) if filter_name==str('u'): F_filter_func=interpolate.interp1d(lambda_u,F_u_func(lambda_u)) #interpolation Filtre B lam_filter=lambda_u if filter_name==str('B'): F_filter_func=interpolate.interp1d(lambda_B,F_B_func(lambda_B)) #interpolation Filtre B lam_filter=lambda_B if filter_name==str('g'): F_filter_func=interpolate.interp1d(lambda_g,F_g_func(lambda_g)) lam_filter=lambda_g if filter_name==str('V'): F_filter_func=interpolate.interp1d(lambda_V,F_V_func(lambda_V)) #interpolation Filtre V lam_filter=lambda_V if filter_name==str('r'): F_filter_func=interpolate.interp1d(lambda_r,F_r_func(lambda_r)) #interpolation Filtre r lam_filter=lambda_r if filter_name==str('i'): F_filter_func=interpolate.interp1d(lambda_i,F_i_func(lambda_i)) #interpolation Filtre i lam_filter=lambda_i # interpolation spectre F_spec=interpolate.interp1d(lam_spec,flux_spec) # New wavelength vector with wavelength of filter + spectrum wavelength_to_interpolate=np.concatenate([lam_spec,lam_filter]) # Sort the wavelength wavelength_to_interpolate.sort() # We select only the wavelenght in the filter wavelength_to_interpolate_2=wavelength_to_interpolate[(wavelength_to_interpolate>min(lam_filter)) & (wavelength_to_interpolate<max(lam_filter))] # We calculate the filter response interpolate_filter_response=F_filter_func(wavelength_to_interpolate_2) # We calculate SEDter SED_inside_filter=F_spec(wavelength_to_interpolate_2) # num=f*s*lambda num=SED_inside_filter*interpolate_filter_response*wavelength_to_interpolate_2*wavelength_to_interpolate_2 # num=f*s dem=SED_inside_filter*interpolate_filter_response*wavelength_to_interpolate_2 # integral de num / integral de dem lambda_eff_filter=np.trapz(num)*1.0/np.trapz(dem) return lambda_eff_filter def effective_wavelength_KAIT(lam_spec,flux_spec,filter_name): ### KAIT 2 ########### trans_B_kait2=np.loadtxt('Filters/KAIT_NICKEL/B_kait2.txt') lambda_B_kait2=trans_B_kait2[:,0] s_B_kait2=trans_B_kait2[:,1] trans_V_kait2=np.loadtxt('Filters/KAIT_NICKEL/V_kait2.txt') lambda_V_kait2=trans_V_kait2[:,0] s_V_kait2=trans_V_kait2[:,1] trans_R_kait2=np.loadtxt('Filters/KAIT_NICKEL/R_kait2.txt') lambda_R_kait2=trans_R_kait2[:,0] s_R_kait2=trans_R_kait2[:,1] trans_I_kait2=
np.loadtxt('Filters/KAIT_NICKEL/I_kait2.txt')
numpy.loadtxt
import numpy as np import time, sys from mdfmodels import mdfmodels from schwimmbad import MultiPool import functools grid_logp = np.arange(-4,0.01,0.025) grid_feh0 = np.arange(-4,0.01,0.025) grid_M =
np.arange(1,30.1,0.2)
numpy.arange
import torch import numpy import torch.nn.functional as F import pickle import sys class EnergyAttack(): def __init__(self, step_size, epsilon, perturb_steps, random_start=None): self.step_size = step_size self.epsilon = epsilon self.perturb_steps = perturb_steps self.random_start = random_start self.consumed_steps = [] self.verbosed = False @property def p(self): return int((self.basis.shape[0] // 3) ** 0.5 + 0.5) def do_clamp(self, x_adv, x): x_adv = torch.min(torch.max(x_adv, x - self.epsilon), x + self.epsilon) x_adv = torch.clamp(x_adv, 0.0, 1.0) return x_adv def generate_new(self, x_adv, x, step): n_pert = self.n_pert # return self.do_clamp(x_adv + torch.sign(torch.randn_like(x_adv)) * self.epsilon / 2, x) noanneal = any('ea:annealoff' in x for x in sys.argv) if noanneal: if not self.verbosed: print("Variant: anneal off") self.verbosed = True po = 1.0 else: po = 0.5 * step / self.perturb_steps # po = 1.0 * step / self.perturb_steps k = numpy.random.choice( len(self.eigv), size=[len(x)], p=(self.eigv ** po) / (self.eigv ** po).sum() ) directions = self.basis.T[k] sp_dir =
numpy.zeros(x_adv.shape)
numpy.zeros
# -*- coding: utf-8 -*- """ Created on Sun Oct 28 22:21:18 2018 @author: steven image_gen performs color/derivative thresholding and applies perspective transform """ import numpy as np import cv2 import pickle import glob import matplotlib.pyplot as plt import matplotlib.image as mpimg # Read in saved objpoints and imagepoints dist_pickle = pickle.load(open('./calibration_pickle.p','rb')) mtx = dist_pickle["mtx"] dist = dist_pickle['dist'] # Define a function that applies Sobel x and y, # then computes the direction of the gradient # and applies a threshold. def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)): gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize = sobel_kernel) sobely = cv2.Sobel(gray,cv2.CV_64F, 0, 1, ksize = sobel_kernel) grad_dir = np.arctan2(np.absolute(sobely),np.absolute(sobelx)) binary_output = np.zeros_like(grad_dir) binary_output[(grad_dir >= thresh[0]) & \ (grad_dir <= thresh[1])] =1 return binary_output # Define a function that applies Sobel x or y, # then takes an absolute value and applies a threshold. def abs_sobel_thresh(img, orient='x', sobel_kernel=3,thresh=(0,255)): gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) if orient == 'x': sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel) elif orient == 'y': sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel) else: pass abs_sobel = np.absolute(sobel) scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel)) binary_output = np.zeros_like(scaled_sobel) binary_output[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1 return binary_output def img_masked_trapz(img,bottom_left=(0,1280),top_left=(0,0),top_right=(720,0),bottom_right=(720,1280)): gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) #grayscale conversion mask =
np.zeros_like(gray)
numpy.zeros_like
""" Utility functions for SRSF Manipulations moduleauthor:: <NAME> <<EMAIL>> """ import numpy as np import time from scipy.integrate import trapz, cumtrapz from scipy.interpolate import UnivariateSpline from numpy.linalg import norm class rlbfgs: r""" This class provides alignment methods for functional data using the SRVF framework using the Riemannian limited memory BFGS solver. The solver is designed to operate on the positive orthant of the unit hypersphere in :math:`L^2([0,1],R)`. The set of all functions :math:`h=\sqrt{\dot{\gamma}}`, where :math:`\gamma` is a diffeomorphism, is that manifold. The inputs q1 and q2 are the square root velocity functions of curves in R^n to be aligned. Here, q2 will be aligned to q1. Usage: obj = rlbfgs(q1,q2,t) :param q1: (M,N): matrix defining srvf of dimension M of N samples :param q2: (M,N): matrix defining srvf of dimension M of N samples :param t: time vector of length N :param q2Opt: optimally aligned srvf :param gammaOpt: optimal warping function :param cost: final cost :param info: dictionary consisting of info about the iterations <NAME>, <NAME>, <NAME>, <NAME>. "Riemannian Optimization for Elastic Shape Analysis", Short version, The 21st International Symposium on Mathematical Theory of Networks and Systems (MTNS 2014). Code based on rlbfgs.m in Manopt: www.manopt.org Author : <NAME> (JDT) <jdtuck AT sandia.gov> Date : 27-Oct-2020 """ def __init__(self, q1, q2, t): """ Construct an instance of the rlbfgs class :param q1: (M,N): matrix defining srvf of dimension M of N samples :param q2: (M,N): matrix defining srvf of dimension M of N samples :param t: time vector of length N """ self.t = t self.T = t.shape[0] if q1.ndim > 1: self.q1 = q1 self.q2 = q2 else: self.q1 = q1/norm(q1) self.q2 = q2/norm(q2) def solve(self, maxiter=30, verb=0): """ Run solver :param maxiter: maximum number of interations :param verb: integer used to tune the amount of output """ # @todo add options to parameters if needed # terminates if the norm of the gradient drops below this tolgradnorm = 1e-3 # terminates if more than seconds elapsed maxtime = np.inf # minimum norm of tangent vector that points from current to next minstepsize = 1e-10 # number of previous iterations the program remembers memory = 30 memory = max(memory,0) # the cautious step needs a real function that has value 0 at t=0 strict_inc_func = lambda t : 1e-4*t ls_max_steps = 25 options = {"tolgradnorm": tolgradnorm, "maxtime": maxtime, "memory":memory, "strict_inc_func":strict_inc_func, "ls_max_steps":ls_max_steps, "maxiter":maxiter, "minstepsize":minstepsize} timetic = time.time() ## Initialization of Variables htilde = np.ones(self.T) q2tilde = self.q2 # number of iterations since last restart j = 0 # Total number of BFGS iterations k = 0 # list to store step vectors which point from h_id to h_{k+1} # for k indexing the last iterations, capped at option memory sHistory = [None] * memory # list store differences for latest k's for the gradient at time # k+1 and the gradient at time k yHistory = [None] * memory # stores the reciprocal of the inner product between # sHistory[k] and yHistory[k] rhoHistory = [None] * memory # scaling of direction given by getDirection alpha = 1 # scaling of initial matrix, Barzilai-Borwein scaleFactor = 1 # Norm of the step stepsize = 1 # sores wether the step is accepted byt he cautious update check accepted = True # compute cost function and its gradient hCurCost, hCurGradient = self.alignment_costgrad(q2tilde) hCurGradNorm = self.norm(hCurGradient) # line-search statistics for recording in info lsstats = {"costevals":0,"stepsize":0.0,"alpha":0.0} # flag to control restarting scheme to avoid infinite loops ultimatum = False if verb >= 2: print(' iter cost val grad. norm alpha\n') # stats info = [] stats = {"iter":k, "cost":hCurCost, "gradnorm":hCurGradNorm,"stepsize":np.nan, "time":time.time() - timetic, "accepted":None, "linesearch": lsstats} info.append(stats) while True: if verb >= 2: print('%5d %+.16e %.8e %.4e\n' % (k, hCurCost, hCurGradNorm, alpha)) #Start timing this iteration timetic = time.time() # run standard stopping criterion checks stop = self.stoppingcriterion(options, info, k) if stop == 0: if stats["stepsize"] < options["minstepsize"]: if not ultimatum: if verb >= 2: print('stepsize is too small, restarting the bfgs procedure at the current point.\n') j = 0 ultimatum = True else: stop = 1 else: # we are not in trouble: list the ultimatum if it was on ultimatum = False if stop > 0: break # compute BFGS direction p = self.getDirection(hCurGradient, sHistory, yHistory, rhoHistory, scaleFactor, min(j,memory)) # execute line-search in_prod = self.inner(hCurGradient,p) stepsize, hNext, lsstats = self.linesearch_hint(p, hCurCost, in_prod, q2tilde, options) # Iterative update of optimal diffeomorphism and q2 via group action htilde = self.group_action_SRVF(htilde,hNext) q2tilde = self.group_action_SRVF(q2tilde,hNext) # Record the BFGS step-multiplier alpha which was effectively # selected. Toward convergence, we hope to see alpha = 1. alpha = stepsize/self.norm(p) step = alpha*p # query cost and gradient at the candidate new point hNextCost, hNextGradient = self.alignment_costgrad(q2tilde) # compute sk and yk sk = step yk = hNextGradient-hCurGradient # Computation of the BFGS step is invariant under scaling of sk and # yk by a common factor. For numerical reasons, we scale sk and yk # so that sk is a unit norm vector. norm_sk = self.norm(sk) sk = sk/norm_sk yk = yk/norm_sk inner_sk_yk = self.inner(sk,yk) inner_sk_sk = self.norm(sk)**2 # ensures nonnegativity # If the cautious step is accepted (which is the intended # behavior), we record sk, yk, and rhok and need to do some # housekeeping. If the cautious step is rejected, these are not # recorded. In all cases, hNext is the next iterate: the notion of # accept/reject here is limited to whether or not we keep track of # sk, yk, rhok to update the BFGS operator. cap = options["strict_inc_func"](hCurGradNorm) if inner_sk_sk != 0 and (inner_sk_yk/inner_sk_sk) >= cap: accepted = True rhok = 1/inner_sk_yk scaleFactor = inner_sk_yk/self.norm(yk)**2 # Time to store the vectors sk, yk and the scalar rhok # If we are out of memory if j>=memory: # sk and yk are saved from 1 to the end with the most # current recorded to the rightmost hand side of the cells # that are occupied. When memory is full, do a shift so # that the rightmost is earliest and replace it with the # most recent sk, yk. if memory > 1: tmp = sHistory[1:] tmp.append(sHistory[0]) sHistory = tmp tmp = yHistory[1:] tmp.append(yHistory[0]) yHistory = tmp tmp = rhoHistory[1:] tmp.append(rhoHistory[0]) rhoHistory = tmp if memory > 0: sHistory[memory] = sk yHistory[memory] = yk rhoHistory[memory] = rhok # if we are not out of memory else: sHistory[j] = sk yHistory[j] = yk rhoHistory[j] = rhok j += 1 # the cautious step is rejected we do not store sk, yk, and rhok else: accepted = False # update variables to new iterate hCurGradient = hNextGradient hCurGradNorm = self.norm(hNextGradient) hCurCost = hNextCost # iter is the number of iterations we have accomplished. k += 1 stats = {"iter":k, "cost":hCurCost, "gradnorm":hCurGradNorm,"stepsize":np.nan, "time":time.time() - timetic, "accepted":accepted, "linesearch": lsstats} info.append(stats) self.info = info[0:(k+1)] self.gammaOpt = np.zeros(self.T) self.gammaOpt[1:] = cumtrapz(htilde**2,self.t) self.q2Opt = q2tilde self.cost = hCurCost if verb >= 1: print('Total time is %f [s] (excludes statsfun)\n' % info[-1].time) return def alignment_cost(self, h, q2k): r""" Evaluate the cost function :math:`f = ||q1 - ((q2,hk),h)||^2`. :math:`h=sqrt{\dot{\gamma}}` is a sequential update of cumulative warping hk """ q2new = self.group_action_SRVF(q2k,h) f = self.normL2(self.q1-q2new)**2 return f def alignment_costgrad(self, q2k): r""" Evaluate the cost function :math:`f = ||q1 - (q2,hk)||^2`, and evaluate the gradient g = grad f in the tangent space of identity. :math:`hk=sqrt{\dot{\gamma_k}}` is the cumulative warping of q2 produced by an iterative sequential optimization algorithm. """ t = self.t T = self.T q1 = self.q1 # compute cost f = self.normL2(q1-q2k)**2 # compute cost gradient q2kdot = np.gradient(q2k, 1/(T-1)) if q2k.ndim > 1: q2kdot = q2kdot[1] dq = q1-q2k v = np.zeros(T) tmp = dq*q2kdot tmp1 = dq*q2k if tmp.ndim > 1: v[1:] = 2*cumtrapz(tmp.sum(axis=0),t) v = v - tmp1.sum(axis=0) else: v[1:] = 2*cumtrapz(tmp, t) v = v - tmp1 g = v - trapz(v,t) return f, g def getDirection(self, hCurGradient, sHistory, yHistory, rhoHistory, scaleFactor, j): """ BFGS step, see Wen's paper for details. This function takes in a tangent vector g, and applies an approximate inverse Hessian P to it to get Pg. Then, -Pg is returned. Parallel transport is not needed for this problem since we always work in the tangent space of identity. """ q = hCurGradient inner_s_q = np.zeros(j) for i in range(j,0,-1): inner_s_q[i-1] = rhoHistory[i-1] * self.inner(sHistory[i-1],q) q = q - inner_s_q[i-1] * yHistory[i-1] r = scaleFactor * q for i in range(0,j): omega = rhoHistory[i] * self.inner(yHistory[i],r) r = r + (inner_s_q[i]-omega)*sHistory[i] direction = -r return direction def linesearch_hint(self, d, f0, df0, q2k, options): """ Armijo line-search based on the line-search hint in the problem structure. Base line-search algorithm for descent methods, based on a simple backtracking method. The search direction provided has to be a descent direction, as indicated by a negative df0 = directional derivative of f at the identity element along d. The algorithm selects a hardcoded initial step size. If that step does not fulfill the Armijo sufficient decrease criterion, that step size is reduced geometrically until a satisfactory step size is obtained or until a failure criterion triggers. Below, the step is constructed as alpha*d, and the step size is the norm of that vector, thus: stepsize = alpha*norm_d. The step is executed by computing the exponential mapping exp_{hid}(alpha*d), giving newh. """ contraction_factor = .5 suff_decr = 1e-6 max_ls_steps = 25 ls_backtrack = True ls_force_decrease = True # init alpha alpha = 1 # Identity element hid = np.ones(self.T) # Make the chosen step and compute cost there newh = self.exp(hid, d, alpha) newf = self.alignment_cost(newh, q2k) cost_evaluations = 1 # backtrack while the Armijo criterion is not satisfied # or if newh goes outside positive orthant tst = newh<=0 while (ls_backtrack and ((newf > (f0 + suff_decr*alpha*df0)) or (tst.sum()>0))): # reduce the step size alpha *= contraction_factor # look closer down the line newh = self.exp(hid, d, alpha) newf = self.alignment_cost(newh, q2k) cost_evaluations += 1 tst = newh<=0 # make sure we don't run out of budget if cost_evaluations >= max_ls_steps: break # if we got here with obtaining a derease, reject the step if ls_force_decrease and newf > f0: alpha = 0 newh = hid newf = f0 # As seen outside this function, stepsize is the size of the vector we # retract to make the step from h to newh. Since the step is alpha*d: norm_d = self.norm(d) stepsize = alpha * norm_d # return some statistics lsstats = {"costevals":cost_evaluations,"stepsize":stepsize,"alpha":alpha} return stepsize, newh, lsstats def stoppingcriterion(self, options, info, last): stop = 0 stats = info[last] if stats['gradnorm'] <= options["tolgradnorm"]: stop = 2 if stats['time'] >= options["maxtime"]: stop = 3 if stats['iter'] >= options["maxiter"]: stop = 4 return stop def group_action_SRVF(self, q, h): p = q.shape[0] gamma = np.zeros(self.T) gamma[1:] = cumtrapz(h**2,self.t) gamma = gamma / gamma[-1] h = np.sqrt(np.gradient(gamma,self.t)) qnew =
np.zeros(q.shape)
numpy.zeros