prompt
stringlengths
19
879k
completion
stringlengths
3
53.8k
api
stringlengths
8
59
from __future__ import absolute_import import logging from copy import copy import numpy as np import scipy.ndimage as nd from .reference import generate_reference from .reference_q4 import generate_reference_Q4, find_elm_borders_mesh, normalized_zero_mean from ..IO.image_stack import ImageStack from ..elements.b_splines import BSplineSurface from ..elements.q4 import Q4 from ..mesh.meshUtilities import Mesh from ..utils import convert_to_img_frame, find_element_borders def correlate_img_to_ref_spline(node_pos, img, ref, settings): """ Correlate an image to a reference The routine identifies the part of the image covered by the mesh and tries to perform image correlation on this part of the image. Parameters ---------- node_pos : ndarray The position of the nodes mesh : Mesh The mesh object img : ndarray 2d array containing the image ref : Reference The reference object settings : DICInput The settings which will be used during the analysis Returns ------- updated node positions, current pixel values NOTES ------- The function extracts a rectangular region of the image covered by the element, which may be very large if the mesh is tilted. This would reduce the performance of the routine """ element_borders = find_element_borders(node_pos, settings.mesh) image_frame, node_pos_img_coords = convert_to_img_frame(img, node_pos, settings.mesh, element_borders, settings) node_position_increment, Ic, conv = correlate_frames(node_pos_img_coords, settings.mesh, image_frame, ref, settings) node_position_new = node_pos + node_position_increment return node_position_new, Ic, conv def correlate_frames(node_pos, mesh, img, ref, settings): """ Parameters ---------- node_pos : ndarray The position of the nodes mesh : Mesh The mesh object img : ndarray 2d array containing the image frame ref : Reference The reference object settings : DICInput The settings which will be used during the analysis Returns ------- updated node positions, current pixel values """ logger = logging.getLogger(__name__) node_pos = np.copy(node_pos).astype(settings.precision) # Declare empty arrays pixel_pos = np.zeros((2, ref.n_pixels), dtype=settings.precision) dnod_x = np.zeros(mesh.n_nodes * 2) image_filtered = nd.spline_filter(img, order=settings.interpolation_order).transpose() for it in range(settings.maxit): # Find nodal positions within ROI np.dot(node_pos, ref.Nref_stack, out=pixel_pos) # Find pixel values for current coordinates Ic = nd.map_coordinates(image_filtered, pixel_pos, order=settings.interpolation_order, prefilter=False) # Calculate position increment as (B^T B)^-1 * (B^T*dIk) "Least squares solution" dnod = np.dot(ref.K, ref.I0_stack - Ic) # Add increment to nodal positions node_pos[0, :] += dnod[:mesh.n_nodes] node_pos[1, :] += dnod[mesh.n_nodes:] dnod_x += dnod # Check for convergence if np.max(np.abs(dnod)) < settings.tol: logger.info('Frame converged in %s iterations', it) return
np.array((dnod_x[:mesh.n_nodes], dnod_x[mesh.n_nodes:]))
numpy.array
# MIT License # # Copyright (c) 2020 University of Oxford # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ Test cases for the python API for tsdate. """ import unittest import collections import json import warnings import math import numpy as np import scipy import msprime import tsinfer import tskit import tsdate from tsdate.base import NodeGridValues from tsdate.prior import (SpansBySamples, PriorParams, ConditionalCoalescentTimes, fill_priors, gamma_approx) from tsdate.date import (Likelihoods, LogLikelihoods, LogLikelihoodsStreaming, InOutAlgorithms, posterior_mean_var, constrain_ages_topo, get_dates, date) from tsdate.util import nodes_time import utility_functions class TestBasicFunctions(unittest.TestCase): """ Test for some of the basic functions used in tsdate """ def test_alpha_prob(self): self.assertEqual(ConditionalCoalescentTimes.m_prob(2, 2, 3), 1.) self.assertEqual(ConditionalCoalescentTimes.m_prob(2, 2, 4), 0.25) def test_tau_expect(self): self.assertEqual(ConditionalCoalescentTimes.tau_expect(10, 10), 1.8) self.assertEqual(ConditionalCoalescentTimes.tau_expect(10, 100), 0.09) self.assertEqual(ConditionalCoalescentTimes.tau_expect(100, 100), 1.98) self.assertEqual(ConditionalCoalescentTimes.tau_expect(5, 10), 0.4) def test_tau_squared_conditional(self): self.assertAlmostEqual( ConditionalCoalescentTimes.tau_squared_conditional(1, 10), 4.3981418) self.assertAlmostEqual( ConditionalCoalescentTimes.tau_squared_conditional(100, 100), -4.87890977e-18) def test_tau_var(self): self.assertEqual( ConditionalCoalescentTimes.tau_var(2, 2), 1) self.assertAlmostEqual( ConditionalCoalescentTimes.tau_var(10, 20), 0.0922995960) self.assertAlmostEqual( ConditionalCoalescentTimes.tau_var(50, 50), 1.15946186) def test_gamma_approx(self): self.assertEqual(gamma_approx(2, 1), (4., 2.)) self.assertEqual(gamma_approx(0.5, 0.1), (2.5, 5.0)) class TestNodeTipWeights(unittest.TestCase): def verify_weights(self, ts): span_data = SpansBySamples(ts) # Check all non-sample nodes in a tree are represented nonsample_nodes = collections.defaultdict(float) for tree in ts.trees(): for n in tree.nodes(): if not tree.is_sample(n): # do not count a span of a node where there are no sample descendants nonsample_nodes[n] += (tree.span if tree.num_samples(n) > 0 else 0) self.assertEqual(set(span_data.nodes_to_date), set(nonsample_nodes.keys())) for id, span in nonsample_nodes.items(): self.assertAlmostEqual(span, span_data.node_spans[id]) for focal_node in span_data.nodes_to_date: wt = 0 for _, weights in span_data.get_weights(focal_node).items(): self.assertTrue(0 <= focal_node < ts.num_nodes) wt += np.sum(weights['weight']) self.assertLessEqual(max(weights['descendant_tips']), ts.num_samples) if not np.isnan(wt): # Dangling nodes will have wt=nan self.assertAlmostEqual(wt, 1.0) return span_data def test_one_tree_n2(self): ts = utility_functions.single_tree_ts_n2() span_data = self.verify_weights(ts) # with a single tree there should only be one weight for node in span_data.nodes_to_date: self.assertTrue(len(span_data.get_weights(node)), 1) self.assertTrue(2 in span_data.get_weights(2)[ts.num_samples]['descendant_tips']) def test_one_tree_n3(self): ts = utility_functions.single_tree_ts_n3() n = ts.num_samples span_data = self.verify_weights(ts) # with a single tree there should only be one weight for node in span_data.nodes_to_date: self.assertTrue(len(span_data.get_weights(node)), 1) for nd, expd_tips in [ (4, 3), # Node 4 (root) expected to have 3 descendant tips (3, 2)]: # Node 3 (1st internal node) expected to have 2 descendant tips self.assertTrue( np.isin(span_data.get_weights(nd)[n]['descendant_tips'], expd_tips)) def test_one_tree_n4(self): ts = utility_functions.single_tree_ts_n4() n = ts.num_samples span_data = self.verify_weights(ts) # with a single tree there should only be one weight for node in span_data.nodes_to_date: self.assertTrue(len(span_data.get_weights(node)), 1) for nd, expd_tips in [ (6, 4), # Node 6 (root) expected to have 4 descendant tips (5, 3), # Node 5 (1st internal node) expected to have 3 descendant tips (4, 2)]: # Node 4 (2nd internal node) expected to have 3 descendant tips self.assertTrue( np.isin(span_data.get_weights(nd)[n]['descendant_tips'], expd_tips)) def test_two_trees(self): ts = utility_functions.two_tree_ts() n = ts.num_samples span_data = self.verify_weights(ts) self.assertEqual(span_data.lookup_weight(5, n, 3), 1.0) # Root on R tree self.assertEqual(span_data.lookup_weight(4, n, 3), 0.2) # Root on L tree ... # ... but internal node on R tree self.assertEqual(span_data.lookup_weight(4, n, 2), 0.8) self.assertEqual(span_data.lookup_weight(3, n, 2), 1.0) # Internal nd on L tree def test_missing_tree(self): ts = utility_functions.two_tree_ts().keep_intervals( [(0, 0.2)], simplify=False) n = ts.num_samples # Here we have no reference in the trees to node 5 with self.assertLogs(level="WARNING") as log: SpansBySamples(ts) self.assertGreater(len(log.output), 0) self.assertIn("5", log.output[-1]) # Should mention the node number self.assertIn("simplify", log.output[-1]) # Should advise to simplify ts = ts.simplify() span_data = self.verify_weights(ts) # Root on (deleted) R tree is missing self.assertTrue(5 not in span_data.nodes_to_date) self.assertEqual(span_data.lookup_weight(4, n, 3), 1.0) # Root on L tree ... # ... but internal on (deleted) R tree self.assertFalse(np.isin(span_data.get_weights(4)[n]['descendant_tips'], 2)) self.assertEqual(span_data.lookup_weight(3, n, 2), 1.0) # Internal nd on L tree def test_tree_with_unary_nodes(self): ts = utility_functions.single_tree_ts_with_unary() n = ts.num_samples span_data = self.verify_weights(ts) self.assertEqual(span_data.lookup_weight(7, n, 3), 1.0) self.assertEqual(span_data.lookup_weight(6, n, 1), 0.5) self.assertEqual(span_data.lookup_weight(6, n, 3), 0.5) self.assertEqual(span_data.lookup_weight(5, n, 2), 0.5) self.assertEqual(span_data.lookup_weight(5, n, 3), 0.5) self.assertEqual(span_data.lookup_weight(4, n, 2), 0.75) self.assertEqual(span_data.lookup_weight(4, n, 3), 0.25) self.assertEqual(span_data.lookup_weight(3, n, 2), 1.0) @unittest.skip("Unary node is internal then the oldest node") def test_tree_with_unary_nodes_oldest(self): ts = utility_functions.two_tree_ts_with_unary_n3() n = ts.num_samples span_data = self.verify_weights(ts) self.assertEqual(span_data.lookup_weight(9, n, 4), 0.5) self.assertEqual(span_data.lookup_weight(8, n, 4), 1.0) self.assertEqual(span_data.lookup_weight(7, n, 1), 0.5) self.assertEqual(span_data.lookup_weight(7, n, 4), 0.5) self.assertEqual(span_data.lookup_weight(6, n, 2), 0.5) self.assertEqual(span_data.lookup_weight(6, n, 4), 0.5) self.assertEqual(span_data.lookup_weight(5, n, 2), 0.5) self.assertEqual(span_data.lookup_weight(4, n, 2), 1.0) def test_polytomy_tree(self): ts = utility_functions.polytomy_tree_ts() span_data = self.verify_weights(ts) self.assertEqual(span_data.lookup_weight(3, ts.num_samples, 3), 1.0) def test_larger_find_node_tip_weights(self): ts = msprime.simulate(10, recombination_rate=5, mutation_rate=5, random_seed=123) self.assertGreater(ts.num_trees, 1) self.verify_weights(ts) def test_dangling_nodes_warn(self): ts = utility_functions.single_tree_ts_n2_dangling() with self.assertLogs(level="WARNING") as log: self.verify_weights(ts) self.assertGreater(len(log.output), 0) self.assertIn("dangling", log.output[0]) def test_single_tree_n2_delete_intervals(self): ts = utility_functions.single_tree_ts_n2() deleted_interval_ts = ts.delete_intervals([[0.5, 0.6]]) n = deleted_interval_ts.num_samples span_data = self.verify_weights(ts) span_data_deleted = self.verify_weights(deleted_interval_ts) self.assertEqual(span_data.lookup_weight(2, n, 2), span_data_deleted.lookup_weight(2, n, 2)) def test_single_tree_n4_delete_intervals(self): ts = utility_functions.single_tree_ts_n4() deleted_interval_ts = ts.delete_intervals([[0.5, 0.6]]) n = deleted_interval_ts.num_samples span_data = self.verify_weights(ts) span_data_deleted = self.verify_weights(deleted_interval_ts) self.assertEqual(span_data.lookup_weight(4, n, 2), span_data_deleted.lookup_weight(4, n, 2)) self.assertEqual(span_data.lookup_weight(5, n, 3), span_data_deleted.lookup_weight(5, n, 3)) self.assertEqual(span_data.lookup_weight(6, n, 4), span_data_deleted.lookup_weight(6, n, 4)) def test_two_tree_ts_delete_intervals(self): ts = utility_functions.two_tree_ts() deleted_interval_ts = ts.delete_intervals([[0.5, 0.6]]) n = deleted_interval_ts.num_samples span_data = self.verify_weights(ts) span_data_deleted = self.verify_weights(deleted_interval_ts) self.assertEqual(span_data.lookup_weight(3, n, 2), span_data_deleted.lookup_weight(3, n, 2)) self.assertAlmostEqual( span_data_deleted.lookup_weight(4, n, 2)[0], 0.7 / 0.9) self.assertAlmostEqual( span_data_deleted.lookup_weight(4, n, 3)[0], 0.2 / 0.9) self.assertEqual(span_data.lookup_weight(5, n, 3), span_data_deleted.lookup_weight(3, n, 2)) @unittest.skip("YAN to fix") def test_truncated_nodes(self): Ne = 1e2 ts = msprime.simulate( 10, Ne=Ne, length=400, recombination_rate=1e-4, random_seed=12) truncated_ts = utility_functions.truncate_ts_samples( ts, average_span=200, random_seed=123) span_data = self.verify_weights(truncated_ts) raise NotImplementedError(str(span_data)) class TestMakePrior(unittest.TestCase): # We only test make_prior() on single trees def verify_priors(self, ts, prior_distr): # Check prior contains all possible tips priors = ConditionalCoalescentTimes(None, prior_distr=prior_distr) priors.add(ts.num_samples) priors_df = priors[ts.num_samples] self.assertEqual(priors_df.shape[0], ts.num_samples + 1) return(priors_df) def test_one_tree_n2(self): ts = utility_functions.single_tree_ts_n2() priors = self.verify_priors(ts, 'gamma') self.assertTrue(np.allclose( priors[2], PriorParams(alpha=1., beta=1., mean=1., var=1.))) priors = self.verify_priors(ts, 'lognorm') self.assertTrue(np.allclose( priors[2], PriorParams(alpha=-0.34657359, beta=0.69314718, mean=1., var=1.))) def test_one_tree_n3(self): ts = utility_functions.single_tree_ts_n3() prior2mv = {'mean': 1/3, 'var': 1/9} prior3mv = {'mean': 1+1/3, 'var': 1+1/9} priors = self.verify_priors(ts, 'lognorm') self.assertTrue(np.allclose( priors[2], PriorParams(alpha=-1.44518588, beta=0.69314718, **prior2mv))) self.assertTrue(np.allclose( priors[3], PriorParams(alpha=0.04492816, beta=0.48550782, **prior3mv))) priors = self.verify_priors(ts, 'gamma') self.assertTrue(np.allclose( priors[2], PriorParams(alpha=1., beta=3., **prior2mv))) self.assertTrue(np.allclose( priors[3], PriorParams(alpha=1.6, beta=1.2, **prior3mv))) def test_one_tree_n4(self): ts = utility_functions.single_tree_ts_n4() self.skipTest("Fill in values instead of np.nan") prior2mv = {'mean': np.nan, 'var': np.nan} prior3mv = {'mean': np.nan, 'var': np.nan} prior4mv = {'mean': np.nan, 'var': np.nan} priors = self.verify_priors(ts, 'lognorm') self.assertTrue(np.allclose( priors[2], PriorParams(alpha=np.nan, beta=np.nan, **prior2mv))) self.assertTrue(np.allclose( priors[3], PriorParams(alpha=np.nan, beta=np.nan, **prior3mv))) self.assertTrue(np.allclose( priors[4], PriorParams(alpha=np.nan, beta=np.nan, **prior4mv))) priors = self.verify_priors(ts, 'gamma') self.assertTrue(np.allclose( priors[2], PriorParams(alpha=np.nan, beta=np.nan, **prior2mv))) self.assertTrue(np.allclose( priors[3], PriorParams(alpha=np.nan, beta=np.nan, **prior3mv))) self.assertTrue(np.allclose( priors[4], PriorParams(alpha=np.nan, beta=np.nan, **prior4mv))) def test_polytomy_tree(self): ts = utility_functions.polytomy_tree_ts() self.skipTest("Fill in values instead of np.nan") prior3mv = {'mean': np.nan, 'var': np.nan} priors = self.verify_priors(ts, 'lognorm') self.assertTrue(np.allclose( priors[3], PriorParams(alpha=np.nan, beta=np.nan, **prior3mv))) priors = self.verify_prior(ts, 'gamma') self.assertTrue(np.allclose( priors[3], PriorParams(alpha=np.nan, beta=np.nan, **prior3mv))) def test_two_tree_ts(self): ts = utility_functions.two_tree_ts() self.skipTest("Fill in values instead of np.nan") prior2mv = {'mean': np.nan, 'var': np.nan} prior3mv = {'mean': np.nan, 'var': np.nan} priors = self.verify_priors(ts, 'lognorm') self.assertTrue(np.allclose( priors[2], PriorParams(alpha=np.nan, beta=np.nan, **prior2mv))) self.assertTrue(np.allclose( priors[3], PriorParams(alpha=np.nan, beta=np.nan, **prior3mv))) priors = self.verify_priors(ts, 'gamma') self.assertTrue(np.allclose( priors[2], PriorParams(alpha=np.nan, beta=np.nan, **prior2mv))) self.assertTrue(np.allclose( priors[3], PriorParams(alpha=np.nan, beta=np.nan, **prior3mv))) def test_single_tree_ts_with_unary(self): ts = utility_functions.single_tree_ts_with_unary() self.skipTest("Fill in values instead of np.nan") prior2mv = {'mean': np.nan, 'var': np.nan} prior3mv = {'mean': np.nan, 'var': np.nan} priors = self.verify_priors(ts, 'lognorm') self.assertTrue(np.allclose( priors[2], PriorParams(alpha=np.nan, beta=np.nan, **prior2mv))) self.assertTrue(np.allclose( priors[3], PriorParams(alpha=np.nan, beta=np.nan, **prior3mv))) priors = self.verify_priors(ts, 'gamma') self.assertTrue(np.allclose( priors[2], PriorParams(alpha=1., beta=3., **prior2mv))) self.assertTrue(np.allclose( priors[3], PriorParams(alpha=1.6, beta=1.2, **prior3mv))) def test_two_tree_mutation_ts(self): ts = utility_functions.two_tree_mutation_ts() self.skipTest("Fill in values instead of np.nan") prior2mv = {'mean': np.nan, 'var': np.nan} prior3mv = {'mean': np.nan, 'var': np.nan} priors = self.verify_priors(ts, 'lognorm') self.assertTrue(np.allclose( priors[2], PriorParams(alpha=np.nan, beta=np.nan, **prior2mv))) self.assertTrue(np.allclose( priors[3], PriorParams(alpha=np.nan, beta=np.nan, **prior3mv))) priors = self.verify_priors(ts, 'gamma') self.assertTrue(np.allclose( priors[2], PriorParams(alpha=1., beta=3., **prior2mv))) self.assertTrue(np.allclose( priors[3], PriorParams(alpha=1.6, beta=1.2, **prior3mv))) class TestMixturePrior(unittest.TestCase): alpha_beta = [PriorParams.field_index('alpha'), PriorParams.field_index('beta')] def get_mixture_prior_params(self, ts, prior_distr): span_data = SpansBySamples(ts) priors = ConditionalCoalescentTimes(None, prior_distr=prior_distr) priors.add(ts.num_samples, approximate=False) mixture_priors = priors.get_mixture_prior_params(span_data) return(mixture_priors) def test_one_tree_n2(self): ts = utility_functions.single_tree_ts_n2() mixture_priors = self.get_mixture_prior_params(ts, 'gamma') self.assertTrue( np.allclose(mixture_priors[2, self.alpha_beta], [1., 1.])) mixture_priors = self.get_mixture_prior_params(ts, 'lognorm') self.assertTrue( np.allclose(mixture_priors[2, self.alpha_beta], [-0.34657359, 0.69314718])) def test_one_tree_n3(self): ts = utility_functions.single_tree_ts_n3() mixture_priors = self.get_mixture_prior_params(ts, 'gamma') self.assertTrue( np.allclose(mixture_priors[3, self.alpha_beta], [1., 3.])) self.assertTrue( np.allclose(mixture_priors[4, self.alpha_beta], [1.6, 1.2])) mixture_priors = self.get_mixture_prior_params(ts, 'lognorm') self.assertTrue( np.allclose(mixture_priors[3, self.alpha_beta], [-1.44518588, 0.69314718])) self.assertTrue( np.allclose(mixture_priors[4, self.alpha_beta], [0.04492816, 0.48550782])) def test_one_tree_n4(self): ts = utility_functions.single_tree_ts_n4() mixture_priors = self.get_mixture_prior_params(ts, 'gamma') self.assertTrue( np.allclose(mixture_priors[4, self.alpha_beta], [0.81818182, 3.27272727])) self.assertTrue( np.allclose(mixture_priors[5, self.alpha_beta], [1.8, 3.6])) self.assertTrue( np.allclose(mixture_priors[6, self.alpha_beta], [1.97560976, 1.31707317])) def test_polytomy_tree(self): ts = utility_functions.polytomy_tree_ts() mixture_priors = self.get_mixture_prior_params(ts, 'gamma') self.assertTrue( np.allclose(mixture_priors[3, self.alpha_beta], [1.6, 1.2])) def test_two_trees(self): ts = utility_functions.two_tree_ts() mixture_priors = self.get_mixture_prior_params(ts, 'gamma') self.assertTrue( np.allclose(mixture_priors[3, self.alpha_beta], [1., 3.])) # Node 4 should be a mixture between 2 and 3 tips self.assertTrue( np.allclose(mixture_priors[4, self.alpha_beta], [0.60377, 1.13207])) self.assertTrue( np.allclose(mixture_priors[5, self.alpha_beta], [1.6, 1.2])) def test_single_tree_ts_with_unary(self): ts = utility_functions.single_tree_ts_with_unary() mixture_priors = self.get_mixture_prior_params(ts, 'gamma') # Root is a 3 tip prior self.assertTrue( np.allclose(mixture_priors[7, self.alpha_beta], [1.6, 1.2])) # Node 6 should be a 50:50 mixture between 1 and 3 tips self.assertTrue( np.allclose(mixture_priors[6, self.alpha_beta], [0.44444, 0.66666])) # Node 5 should be a 50:50 mixture of 2 and 3 tips self.assertTrue( np.allclose(mixture_priors[5, self.alpha_beta], [0.80645, 0.96774])) # Node 4 should be a 75:25 mixture of 2 and 3 tips self.assertTrue( np.allclose(mixture_priors[4, self.alpha_beta], [0.62025, 1.06329])) # Node 3 is a 2 tip prior self.assertTrue( np.allclose(mixture_priors[3, self.alpha_beta], [1., 3.])) def test_two_tree_mutation_ts(self): ts = utility_functions.two_tree_mutation_ts() mixture_priors = self.get_mixture_prior_params(ts, 'gamma') self.assertTrue( np.allclose(mixture_priors[3, self.alpha_beta], [1., 3.])) # Node 4 should be a mixture between 2 and 3 tips self.assertTrue( np.allclose(mixture_priors[4, self.alpha_beta], [0.60377, 1.13207])) self.assertTrue( np.allclose(mixture_priors[5, self.alpha_beta], [1.6, 1.2])) def check_intervals(self, ts, delete_interval_ts, keep_interval_ts): tests = list() for distr in ['gamma', 'lognorm']: mix_priors = self.get_mixture_prior_params(ts, distr) for interval_ts in [delete_interval_ts, keep_interval_ts]: mix_priors_ints = self.get_mixture_prior_params(interval_ts, distr) for internal_node in range(ts.num_samples, ts.num_nodes): tests.append(np.allclose( mix_priors[internal_node, self.alpha_beta], mix_priors_ints[internal_node, self.alpha_beta])) return tests def test_one_tree_n2_intervals(self): ts = utility_functions.single_tree_ts_n2() delete_interval_ts = ts.delete_intervals([[0.5, 0.6]]) keep_interval_ts = ts.keep_intervals([[0, 0.1]]) tests = self.check_intervals(ts, delete_interval_ts, keep_interval_ts) self.assertTrue(np.all(tests)) def test_two_tree_mutation_ts_intervals(self): ts = utility_functions.two_tree_mutation_ts() ts_extra_length = utility_functions.two_tree_ts_extra_length() delete_interval_ts = ts_extra_length.delete_intervals([[0.75, 1.25]]) keep_interval_ts = ts_extra_length.keep_intervals([[0, 1.]]) tests = self.check_intervals(ts, delete_interval_ts, keep_interval_ts) self.assertTrue(np.all(tests)) class TestPriorVals(unittest.TestCase): def verify_prior_vals(self, ts, prior_distr): span_data = SpansBySamples(ts) priors = ConditionalCoalescentTimes(None, prior_distr=prior_distr) priors.add(ts.num_samples, approximate=False) grid = np.linspace(0, 3, 3) mixture_priors = priors.get_mixture_prior_params(span_data) prior_vals = fill_priors(mixture_priors, grid, ts, prior_distr=prior_distr) return prior_vals def test_one_tree_n2(self): ts = utility_functions.single_tree_ts_n2() prior_vals = self.verify_prior_vals(ts, 'gamma') self.assertTrue(np.allclose(prior_vals[2], [0, 1, 0.22313016])) def test_one_tree_n3(self): ts = utility_functions.single_tree_ts_n3() prior_vals = self.verify_prior_vals(ts, 'gamma') self.assertTrue(np.allclose(prior_vals[3], [0, 1, 0.011109])) self.assertTrue(np.allclose(prior_vals[4], [0, 1, 0.3973851])) def test_one_tree_n4(self): ts = utility_functions.single_tree_ts_n4() prior_vals = self.verify_prior_vals(ts, 'gamma') self.assertTrue(np.allclose(prior_vals[4], [0, 1, 0.00467134])) self.assertTrue(np.allclose(prior_vals[5], [0, 1, 0.02167806])) self.assertTrue(np.allclose(prior_vals[6], [0, 1, 0.52637529])) def test_polytomy_tree(self): ts = utility_functions.polytomy_tree_ts() prior_vals = self.verify_prior_vals(ts, 'gamma') self.assertTrue(np.allclose(prior_vals[3], [0, 1, 0.3973851])) def test_two_tree_ts(self): ts = utility_functions.two_tree_ts() prior_vals = self.verify_prior_vals(ts, 'gamma') self.assertTrue(np.allclose(prior_vals[3], [0, 1, 0.011109])) self.assertTrue(np.allclose(prior_vals[4], [0, 1, 0.080002])) self.assertTrue(np.allclose(prior_vals[5], [0, 1, 0.3973851])) def test_tree_with_unary_nodes(self): ts = utility_functions.single_tree_ts_with_unary() prior_vals = self.verify_prior_vals(ts, 'gamma') self.assertTrue(np.allclose(prior_vals[7], [0, 1, 0.397385])) self.assertTrue(np.allclose(prior_vals[6], [0, 1, 0.113122])) self.assertTrue(np.allclose(prior_vals[5], [0, 1, 0.164433])) self.assertTrue(np.allclose(prior_vals[4], [0, 1, 0.093389])) self.assertTrue(np.allclose(prior_vals[3], [0, 1, 0.011109])) def test_one_tree_n2_intervals(self): ts = utility_functions.single_tree_ts_n2() delete_interval_ts = ts.delete_intervals([[0.1, 0.3]]) keep_interval_ts = ts.keep_intervals([[0.4, 0.6]]) prior_vals = self.verify_prior_vals(ts, 'gamma') prior_vals_keep = self.verify_prior_vals(keep_interval_ts, 'gamma') prior_vals_delete = self.verify_prior_vals(delete_interval_ts, 'gamma') self.assertTrue(np.allclose(prior_vals[2], prior_vals_keep[2])) self.assertTrue(np.allclose(prior_vals[2], prior_vals_delete[2])) class TestLikelihoodClass(unittest.TestCase): def poisson(self, param, x, normalize=True): ll = np.exp(-param) * param ** x / scipy.special.factorial(x) if normalize: return ll / np.max(ll) else: return ll def log_poisson(self, param, x, normalize=True): with np.errstate(divide='ignore'): ll = np.log(np.exp(-param) * param ** x / scipy.special.factorial(x)) if normalize: return ll - np.max(ll) else: return ll def test_get_mut_edges(self): ts = utility_functions.two_tree_mutation_ts() mutations_per_edge = Likelihoods.get_mut_edges(ts) for e in ts.edges(): if e.child == 3 and e.parent == 4: self.assertEqual(mutations_per_edge[e.id], 2) elif e.child == 0 and e.parent == 5: self.assertEqual(mutations_per_edge[e.id], 1) else: self.assertEqual(mutations_per_edge[e.id], 0) def test_create_class(self): ts = utility_functions.two_tree_mutation_ts() grid = np.array([0, 1, 2]) lik = Likelihoods(ts, grid) loglik = LogLikelihoods(ts, grid) self.assertRaises(AssertionError, lik.get_mut_lik_fixed_node, ts.edge(0)) self.assertRaises(AssertionError, lik.get_mut_lik_lower_tri, ts.edge(0)) self.assertRaises(AssertionError, lik.get_mut_lik_upper_tri, ts.edge(0)) self.assertRaises(AssertionError, loglik.get_mut_lik_fixed_node, ts.edge(0)) self.assertRaises(AssertionError, loglik.get_mut_lik_lower_tri, ts.edge(0)) self.assertRaises(AssertionError, loglik.get_mut_lik_upper_tri, ts.edge(0)) def test_no_theta_class(self): ts = utility_functions.two_tree_mutation_ts() grid = np.array([0, 1, 2]) lik = Likelihoods(ts, grid, theta=None) self.assertRaises(RuntimeError, lik.precalculate_mutation_likelihoods) def test_precalc_lik_lower(self): ts = utility_functions.single_tree_ts_n3() grid = np.array([0, 1, 2]) eps = 0 theta = 1 lik = Likelihoods(ts, grid, theta, eps) for method in (0, 1, 2): # TODO: Remove this loop and hard-code one of the methods after perf testing lik.precalculate_mutation_likelihoods(unique_method=method) self.assertEquals(ts.num_trees, 1) span = ts.first().span dt = grid num_muts = 0 n_internal_edges = 0 expected_lik_dt = self.poisson(dt * (theta / 2 * span), num_muts) for edge in ts.edges(): if ts.node(edge.child).is_sample(): self.assertRaises(AssertionError, lik.get_mut_lik_lower_tri, edge) self.assertRaises(AssertionError, lik.get_mut_lik_upper_tri, edge) fixed_edge_lik = lik.get_mut_lik_fixed_node(edge) self.assertTrue(np.allclose(fixed_edge_lik, expected_lik_dt)) else: n_internal_edges += 1 # only one internal edge in this tree self.assertLessEqual(n_internal_edges, 1) self.assertRaises(AssertionError, lik.get_mut_lik_fixed_node, edge) lower_tri = lik.get_mut_lik_lower_tri(edge) self.assertAlmostEqual(lower_tri[0], expected_lik_dt[0]) self.assertAlmostEqual(lower_tri[1], expected_lik_dt[1]) self.assertAlmostEqual(lower_tri[2], expected_lik_dt[0]) self.assertAlmostEqual(lower_tri[3], expected_lik_dt[2]) self.assertAlmostEqual(lower_tri[4], expected_lik_dt[1]) self.assertAlmostEqual(lower_tri[5], expected_lik_dt[0]) def test_precalc_lik_upper_multithread(self): ts = utility_functions.two_tree_mutation_ts() grid = np.array([0, 1, 2]) eps = 0 theta = 1 for L, pois in [(Likelihoods, self.poisson), (LogLikelihoods, self.log_poisson)]: for normalize in (True, False): lik = L(ts, grid, theta, eps, normalize=normalize) dt = grid for num_threads in (None, 1, 2): n_internal_edges = 0 lik.precalculate_mutation_likelihoods(num_threads=num_threads) for edge in ts.edges(): if not ts.node(edge.child).is_sample(): n_internal_edges += 1 # only two internal edges in this tree self.assertLessEqual(n_internal_edges, 2) if edge.parent == 4 and edge.child == 3: num_muts = 2 elif edge.parent == 5 and edge.child == 4: num_muts = 0 else: self.fail("Unexpected edge") span = edge.right - edge.left expected_lik_dt = pois( dt * (theta / 2 * span), num_muts, normalize=normalize) upper_tri = lik.get_mut_lik_upper_tri(edge) self.assertAlmostEqual(upper_tri[0], expected_lik_dt[0]) self.assertAlmostEqual(upper_tri[1], expected_lik_dt[1]) self.assertAlmostEqual(upper_tri[2], expected_lik_dt[2]) self.assertAlmostEqual(upper_tri[3], expected_lik_dt[0]) self.assertAlmostEqual(upper_tri[4], expected_lik_dt[1]) self.assertAlmostEqual(upper_tri[5], expected_lik_dt[0]) def test_tri_functions(self): ts = utility_functions.two_tree_mutation_ts() grid = np.array([0, 1, 2]) eps = 0 theta = 1 lik = Likelihoods(ts, grid, theta, eps) lik.precalculate_mutation_likelihoods() for e in ts.edges(): if e.child == 3 and e.parent == 4: exp_branch_muts = 2 exp_span = 0.2 self.assertEqual(e.right - e.left, exp_span) self.assertEqual(lik.mut_edges[e.id], exp_branch_muts) pois_lambda = grid * theta / 2 * exp_span cumul_pois = np.cumsum(self.poisson(pois_lambda, exp_branch_muts)) lower_tri = lik.get_mut_lik_lower_tri(e) self.assertTrue( np.allclose(lik.rowsum_lower_tri(lower_tri), cumul_pois)) upper_tri = lik.get_mut_lik_upper_tri(e) self.assertTrue( np.allclose( lik.rowsum_upper_tri(upper_tri)[::-1], cumul_pois)) def test_no_theta_class_loglikelihood(self): ts = utility_functions.two_tree_mutation_ts() grid = np.array([0, 1, 2]) lik = LogLikelihoods(ts, grid, theta=None) self.assertRaises(RuntimeError, lik.precalculate_mutation_likelihoods) def test_logsumexp(self): lls = np.array([0.1, 0.2, 0.5]) ll_sum = np.sum(lls) log_lls = np.log(lls) self.assertEqual(LogLikelihoods.logsumexp(log_lls), np.log(ll_sum)) def test_log_tri_functions(self): ts = utility_functions.two_tree_mutation_ts() grid = np.array([0, 1, 2]) eps = 0 theta = 1 lik = Likelihoods(ts, grid, theta, eps) loglik = LogLikelihoods(ts, grid, theta=theta, eps=eps) lik.precalculate_mutation_likelihoods() loglik.precalculate_mutation_likelihoods() for e in ts.edges(): if e.child == 3 and e.parent == 4: exp_branch_muts = 2 exp_span = 0.2 self.assertEqual(e.right - e.left, exp_span) self.assertEqual(lik.mut_edges[e.id], exp_branch_muts) self.assertEqual(loglik.mut_edges[e.id], exp_branch_muts) pois_lambda = grid * theta / 2 * exp_span cumul_pois = np.cumsum(self.poisson(pois_lambda, exp_branch_muts)) lower_tri = lik.get_mut_lik_lower_tri(e) lower_tri_log = loglik.get_mut_lik_lower_tri(e) self.assertTrue( np.allclose(lik.rowsum_lower_tri(lower_tri), cumul_pois)) with np.errstate(divide='ignore'): self.assertTrue( np.allclose(loglik.rowsum_lower_tri(lower_tri_log), np.log(cumul_pois))) upper_tri = lik.get_mut_lik_upper_tri(e) upper_tri_log = loglik.get_mut_lik_upper_tri(e) self.assertTrue( np.allclose( lik.rowsum_upper_tri(upper_tri)[::-1], cumul_pois)) with np.errstate(divide='ignore'): self.assertTrue( np.allclose( loglik.rowsum_upper_tri(upper_tri_log)[::-1], np.log(cumul_pois))) def test_logsumexp_streaming(self): lls = np.array([0.1, 0.2, 0.5]) ll_sum = np.sum(lls) log_lls = np.log(lls) self.assertTrue(np.allclose(LogLikelihoodsStreaming.logsumexp(log_lls), np.log(ll_sum))) class TestNodeGridValuesClass(unittest.TestCase): # TODO - needs a few more tests in here def test_init(self): num_nodes = 5 ids = np.array([3, 4]) timepoints = np.array(range(10)) store = NodeGridValues(num_nodes, ids, timepoints, fill_value=6) self.assertEquals(store.grid_data.shape, (len(ids), len(timepoints))) self.assertEquals(len(store.fixed_data), (num_nodes-len(ids))) self.assertTrue(np.all(store.grid_data == 6)) self.assertTrue(np.all(store.fixed_data == 6)) ids = np.array([3, 4], dtype=np.int32) store = NodeGridValues(num_nodes, ids, timepoints, fill_value=5) self.assertEquals(store.grid_data.shape, (len(ids), len(timepoints))) self.assertEquals(len(store.fixed_data), num_nodes-len(ids)) self.assertTrue(np.all(store.fixed_data == 5)) def test_set_and_get(self): num_nodes = 5 grid_size = 2 fill = {} for ids in ([3, 4], []): np.random.seed(1) store = NodeGridValues( num_nodes, np.array(ids, dtype=np.int32), np.array(range(grid_size))) for i in range(num_nodes): fill[i] = np.random.random(grid_size if i in ids else None) store[i] = fill[i] for i in range(num_nodes): self.assertTrue(np.all(fill[i] == store[i])) self.assertRaises(IndexError, store.__getitem__, num_nodes) def test_bad_init(self): ids = [3, 4] self.assertRaises(ValueError, NodeGridValues, 3, np.array(ids), np.array([0, 1.2, 2])) self.assertRaises(AttributeError, NodeGridValues, 5, np.array(ids), -1) self.assertRaises(ValueError, NodeGridValues, 5, np.array([-1]), np.array([0, 1.2, 2])) def test_clone(self): num_nodes = 10 grid_size = 2 ids = [3, 4] orig = NodeGridValues(num_nodes, np.array(ids), np.array(range(grid_size))) orig[3] = np.array([1, 2]) orig[4] = np.array([4, 3]) orig[0] = 1.5 orig[9] = 2.5 # test with np.zeros clone = NodeGridValues.clone_with_new_data(orig, 0) self.assertEquals(clone.grid_data.shape, orig.grid_data.shape) self.assertEquals(clone.fixed_data.shape, orig.fixed_data.shape) self.assertTrue(np.all(clone.grid_data == 0)) self.assertTrue(np.all(clone.fixed_data == 0)) # test with something else clone = NodeGridValues.clone_with_new_data(orig, 5) self.assertEquals(clone.grid_data.shape, orig.grid_data.shape) self.assertEquals(clone.fixed_data.shape, orig.fixed_data.shape) self.assertTrue(np.all(clone.grid_data == 5)) self.assertTrue(np.all(clone.fixed_data == 5)) # test with different scalars = np.arange(num_nodes - len(ids)) clone = NodeGridValues.clone_with_new_data(orig, 0, scalars) self.assertEquals(clone.grid_data.shape, orig.grid_data.shape) self.assertEquals(clone.fixed_data.shape, orig.fixed_data.shape) self.assertTrue(np.all(clone.grid_data == 0)) self.assertTrue(np.all(clone.fixed_data == scalars)) clone = NodeGridValues.clone_with_new_data( orig, np.array([[1, 2], [4, 3]])) for i in range(num_nodes): if i in ids: self.assertTrue(np.all(clone[i] == orig[i])) else: self.assertTrue(np.isnan(clone[i])) clone = NodeGridValues.clone_with_new_data( orig, np.array([[1, 2], [4, 3]]), 0) for i in range(num_nodes): if i in ids: self.assertTrue(np.all(clone[i] == orig[i])) else: self.assertEquals(clone[i], 0) def test_bad_clone(self): num_nodes = 10 ids = [3, 4] orig = NodeGridValues(num_nodes, np.array(ids), np.array([0, 1.2])) self.assertRaises( ValueError, NodeGridValues.clone_with_new_data, orig, np.array([[1, 2, 3], [4, 5, 6]])) self.assertRaises( ValueError, NodeGridValues.clone_with_new_data, orig, 0, np.array([[1, 2], [4, 5]])) class TestAlgorithmClass(unittest.TestCase): def test_nonmatching_prior_vs_lik_timepoints(self): ts = utility_functions.single_tree_ts_n3() timepoints1 = np.array([0, 1.2, 2]) timepoints2 = np.array([0, 1.1, 2]) priors = tsdate.build_prior_grid(ts, timepoints1) lls = Likelihoods(ts, timepoints2) self.assertRaisesRegexp(ValueError, "timepoints", InOutAlgorithms, priors, lls) def test_nonmatching_prior_vs_lik_fixednodes(self): ts1 = utility_functions.single_tree_ts_n3() ts2 = utility_functions.single_tree_ts_n2_dangling() timepoints = np.array([0, 1.2, 2]) priors = tsdate.build_prior_grid(ts1, timepoints) lls = Likelihoods(ts2, priors.timepoints) self.assertRaisesRegexp(ValueError, "fixed", InOutAlgorithms, priors, lls) class TestInsideAlgorithm(unittest.TestCase): def run_inside_algorithm(self, ts, prior_distr, normalize=True): priors = tsdate.build_prior_grid(ts, timepoints=np.array([0, 1.2, 2]), approximate_priors=False, prior_distribution=prior_distr) theta = 1 eps = 1e-6 lls = Likelihoods(ts, priors.timepoints, theta, eps=eps) lls.precalculate_mutation_likelihoods() algo = InOutAlgorithms(priors, lls) algo.inside_pass(normalize=normalize) return algo, priors def test_one_tree_n2(self): ts = utility_functions.single_tree_ts_n2() algo = self.run_inside_algorithm(ts, 'gamma')[0] self.assertTrue(np.allclose(algo.inside[2], np.array([0, 1, 0.10664654]))) def test_one_tree_n3(self): ts = utility_functions.single_tree_ts_n3() algo = self.run_inside_algorithm(ts, 'gamma')[0] self.assertTrue(np.allclose(algo.inside[3], np.array([0, 1, 0.0114771635]))) self.assertTrue(np.allclose(algo.inside[4], np.array([0, 1, 0.1941815518]))) def test_one_tree_n4(self): ts = utility_functions.single_tree_ts_n4() algo = self.run_inside_algorithm(ts, 'gamma')[0] self.assertTrue(np.allclose(algo.inside[4], np.array([0, 1, 0.00548801]))) self.assertTrue(np.allclose(algo.inside[5], np.array([0, 1, 0.0239174]))) self.assertTrue(np.allclose(algo.inside[6], np.array([0, 1, 0.26222197]))) def test_polytomy_tree(self): ts = utility_functions.polytomy_tree_ts() algo = self.run_inside_algorithm(ts, 'gamma')[0] self.assertTrue(np.allclose(algo.inside[3], np.array([0, 1, 0.12797265]))) def test_two_tree_ts(self): ts = utility_functions.two_tree_ts() algo, priors = self.run_inside_algorithm(ts, 'gamma', normalize=False) # priors[3][1] * Ll_(0->3)(1.2 - 0 + eps) ** 2 node3_t1 = priors[3][1] * scipy.stats.poisson.pmf( 0, (1.2 + 1e-6) * 0.5 * 0.2) ** 2 # priors[3][2] * sum(Ll_(0->3)(2 - t + eps)) node3_t2 = priors[3][2] * scipy.stats.poisson.pmf( 0, (2 + 1e-6) * 0.5 * 0.2) ** 2 self.assertTrue(np.allclose(algo.inside[3], np.array([0, node3_t1, node3_t2]))) """ priors[4][1] * (Ll_(2->4)(1.2 - 0 + eps) * (Ll_(1->4)(1.2 - 0 + eps)) * (Ll_(3->4)(1.2-1.2+eps) * node3_t1) """ node4_t1 = priors[4][1] * (scipy.stats.poisson.pmf( 0, (1.2 + 1e-6) * 0.5 * 1) * scipy.stats.poisson.pmf( 0, (1.2 + 1e-6) * 0.5 * 0.8) * ((scipy.stats.poisson.pmf(0, (1e-6) * 0.5 * 0.2) * node3_t1))) """ priors[4][2] * (Ll_(2->4)(2 - 0 + eps) * Ll_(1->4)(2 - 0 + eps) * (sum_(t'<2)(Ll_(3->4)(2-t'+eps) * node3_t)) """ node4_t2 = priors[4][2] * (scipy.stats.poisson.pmf( 0, (2 + 1e-6) * 0.5 * 1) * scipy.stats.poisson.pmf( 0, (2 + 1e-6) * 0.5 * 0.8) * ((scipy.stats.poisson.pmf( 0, (0.8 + 1e-6) * 0.5 * 0.2) * node3_t1) + (scipy.stats.poisson.pmf(0, (1e-6 + 1e-6) * 0.5 * 0.2) * node3_t2))) self.assertTrue(np.allclose(algo.inside[4], np.array([0, node4_t1, node4_t2]))) """ priors[5][1] * (Ll_(4->5)(1.2 - 1.2 + eps) * (node3_t ** 0.8)) * (Ll_(0->5)(1.2 - 0 + eps) * 1) raising node4_t to 0.8 is geometric scaling """ node5_t1 = priors[5][1] * (scipy.stats.poisson.pmf( 0, (1e-6) * 0.5 * 0.8) * (node4_t1 ** 0.8)) * (scipy.stats.poisson.pmf( 0, (1.2 + 1e-6) * 0.5 * 0.8)) """ prior[5][2] * (sum_(t'<1.2)(Ll_(4->5)(1.2 - 0 + eps) * (node3_t ** 0.8)) * (Ll_(0->5)(1.2 - 0 + eps) * 1) """ node5_t2 = priors[5][2] * ((scipy.stats.poisson.pmf( 0, (0.8 + 1e-6) * 0.5 * 0.8) * (node4_t1 ** 0.8)) + (scipy.stats.poisson.pmf(0, (1e-6 + 1e-6) * 0.5 * 0.8) * (node4_t2 ** 0.8))) * (scipy.stats.poisson.pmf( 0, (2 + 1e-6) * 0.5 * 0.8)) self.assertTrue(np.allclose(algo.inside[5], np.array([0, node5_t1, node5_t2]))) def test_tree_with_unary_nodes(self): ts = utility_functions.single_tree_ts_with_unary() algo = self.run_inside_algorithm(ts, 'gamma')[0] self.assertTrue(np.allclose(algo.inside[7], np.array([0, 1, 0.25406637]))) self.assertTrue(np.allclose(algo.inside[6], np.array([0, 1, 0.07506923]))) self.assertTrue(np.allclose(algo.inside[5], np.array([0, 1, 0.13189998]))) self.assertTrue(np.allclose(algo.inside[4], np.array([0, 1, 0.07370801]))) self.assertTrue(np.allclose(algo.inside[3], np.array([0, 1, 0.01147716]))) def test_two_tree_mutation_ts(self): ts = utility_functions.two_tree_mutation_ts() algo = self.run_inside_algorithm(ts, 'gamma')[0] self.assertTrue(np.allclose(algo.inside[3], np.array([0, 1, 0.02176622]))) # self.assertTrue(np.allclose(upward[4], np.array([0, 2.90560754e-05, 1]))) # NB the replacement below has not been hand-calculated self.assertTrue(np.allclose(algo.inside[4], np.array([0, 3.63200499e-11, 1]))) # self.assertTrue(np.allclose(upward[5], np.array([0, 5.65044738e-05, 1]))) # NB the replacement below has not been hand-calculated self.assertTrue(np.allclose(algo.inside[5], np.array([0, 7.06320034e-11, 1]))) def test_dangling_fails(self): ts = utility_functions.single_tree_ts_n2_dangling() print(ts.draw_text()) print("Samples:", ts.samples()) priors = tsdate.build_prior_grid(ts, timepoints=np.array([0, 1.2, 2])) theta = 1 eps = 1e-6 lls = Likelihoods(ts, priors.timepoints, theta, eps) algo = InOutAlgorithms(priors, lls) self.assertRaisesRegexp(ValueError, "dangling", algo.inside_pass) class TestOutsideAlgorithm(unittest.TestCase): def run_outside_algorithm( self, ts, prior_distr="lognorm", normalize=False, ignore_oldest_root=False): span_data = SpansBySamples(ts) priors = ConditionalCoalescentTimes(None, prior_distr) priors.add(ts.num_samples, approximate=False) grid = np.array([0, 1.2, 2]) mixture_priors = priors.get_mixture_prior_params(span_data) prior_vals = fill_priors(mixture_priors, grid, ts, prior_distr=prior_distr) theta = 1 eps = 1e-6 lls = Likelihoods(ts, grid, theta, eps=eps) lls.precalculate_mutation_likelihoods() algo = InOutAlgorithms(prior_vals, lls) algo.inside_pass() algo.outside_pass(normalize=normalize, ignore_oldest_root=ignore_oldest_root) return algo def test_one_tree_n2(self): ts = utility_functions.single_tree_ts_n2() for prior_distr in ('lognorm', 'gamma'): algo = self.run_outside_algorithm(ts, prior_distr) # Root, should this be 0,1,1 or 1,1,1 self.assertTrue(np.array_equal( algo.outside[2], np.array([1, 1, 1]))) def test_one_tree_n3(self): ts = utility_functions.single_tree_ts_n3() for prior_distr in ('lognorm', 'gamma'): algo = self.run_outside_algorithm(ts, prior_distr) # self.assertTrue(np.allclose( # downward[3], np.array([0, 1, 0.33508884]))) self.assertTrue(np.allclose(algo.outside[4], np.array([1, 1, 1]))) # self.assertTrue(np.allclose( # posterior[3], np.array([0, 0.99616886, 0.00383114]))) # self.assertTrue(np.allclose( # posterior[4], np.array([0, 0.83739361, 0.16260639]))) def test_one_tree_n4(self): ts = utility_functions.single_tree_ts_n4() for prior_distr in ('lognorm', 'gamma'): algo = self.run_outside_algorithm(ts, prior_distr) # self.assertTrue(np.allclose( # downward[4], np.array([0, 1, 0.02187283]))) # self.assertTrue(np.allclose( # downward[5], np.array([0, 1, 0.41703272]))) # Root, should this be 0,1,1 or 1,1,1 self.assertTrue(np.allclose( algo.outside[6], np.array([1, 1, 1]))) def test_outside_before_inside_fails(self): ts = utility_functions.single_tree_ts_n2() priors = tsdate.build_prior_grid(ts) theta = 1 lls = Likelihoods(ts, priors.timepoints, theta) lls.precalculate_mutation_likelihoods() algo = InOutAlgorithms(priors, lls) self.assertRaises(RuntimeError, algo.outside_pass) def test_normalize_outside(self): ts = msprime.simulate(50, Ne=10000, mutation_rate=1e-8, recombination_rate=1e-8) normalize = self.run_outside_algorithm(ts, normalize=True) no_normalize = self.run_outside_algorithm(ts, normalize=False) self.assertTrue( np.allclose( normalize.outside.grid_data[:], (no_normalize.outside.grid_data[:] / np.max( no_normalize.outside.grid_data[:], axis=1)[:, np.newaxis]))) def test_ignore_oldest_root(self): ts = utility_functions.single_tree_ts_mutation_n3() ignore_oldest = self.run_outside_algorithm(ts, ignore_oldest_root=True) use_oldest = self.run_outside_algorithm(ts, ignore_oldest_root=False) self.assertTrue(~np.array_equal( ignore_oldest.outside[3], use_oldest.outside[3])) # When node is not used in outside algorithm, all values should be equal self.assertTrue(np.all(ignore_oldest.outside[3] == ignore_oldest.outside[3][0])) self.assertTrue(np.all(use_oldest.outside[4] == use_oldest.outside[4][0])) def test_ignore_oldest_root_two_mrcas(self): ts = utility_functions.two_tree_two_mrcas() ignore_oldest = self.run_outside_algorithm(ts, ignore_oldest_root=True) use_oldest = self.run_outside_algorithm(ts, ignore_oldest_root=False) self.assertTrue(~np.array_equal( ignore_oldest.outside[7], use_oldest.outside[7])) self.assertTrue(~np.array_equal( ignore_oldest.outside[6], use_oldest.outside[6])) # In this example, if the outside algorithm was *not* used, nodes 4 and 5 should # have same outside values. If it is used, node 5 should seem younger than 4 self.assertTrue(np.array_equal( ignore_oldest.outside[4], ignore_oldest.outside[5])) self.assertTrue(~np.array_equal( use_oldest.outside[4], use_oldest.outside[5])) class TestTotalFunctionalValueTree(unittest.TestCase): """ Tests to ensure that we recover the total functional value of the tree. We can also recover this property in the tree sequence in the special case where all node times are known (or all bar one). """ def find_posterior(self, ts, prior_distr): grid = np.array([0, 1.2, 2]) span_data = SpansBySamples(ts) priors = ConditionalCoalescentTimes(None, prior_distr=prior_distr) priors.add(ts.num_samples, approximate=False) mixture_priors = priors.get_mixture_prior_params(span_data) prior_vals = fill_priors(mixture_priors, grid, ts, prior_distr=prior_distr) theta = 1 eps = 1e-6 lls = Likelihoods(ts, grid, theta, eps=eps) lls.precalculate_mutation_likelihoods() algo = InOutAlgorithms(prior_vals, lls) algo.inside_pass() posterior = algo.outside_pass(normalize=False) self.assertTrue(np.array_equal(np.sum( algo.inside.grid_data * algo.outside.grid_data, axis=1), np.sum(algo.inside.grid_data * algo.outside.grid_data, axis=1))) self.assertTrue(np.allclose(np.sum( algo.inside.grid_data * algo.outside.grid_data, axis=1), np.sum(algo.inside.grid_data[-1]))) return posterior, algo def test_one_tree_n2(self): ts = utility_functions.single_tree_ts_n2() for distr in ('gamma', 'lognorm'): posterior, algo = self.find_posterior(ts, distr) def test_one_tree_n3(self): ts = utility_functions.single_tree_ts_n3() for distr in ('gamma', 'lognorm'): posterior, algo = self.find_posterior(ts, distr) def test_one_tree_n4(self): ts = utility_functions.single_tree_ts_n4() for distr in ('gamma', 'lognorm'): posterior, algo = self.find_posterior(ts, distr) def test_one_tree_n3_mutation(self): ts = utility_functions.single_tree_ts_mutation_n3() for distr in ('gamma', 'lognorm'): posterior, algo = self.find_posterior(ts, distr) def test_polytomy_tree(self): ts = utility_functions.polytomy_tree_ts() for distr in ('gamma', 'lognorm'): posterior, algo = self.find_posterior(ts, distr) def test_tree_with_unary_nodes(self): ts = utility_functions.single_tree_ts_with_unary() for distr in ('gamma', 'lognorm'): posterior, algo = self.find_posterior(ts, distr) class TestGilTree(unittest.TestCase): """ Test results against hardcoded values Gil independently worked out """ def test_gil_tree(self): for cache_inside in [False, True]: ts = utility_functions.gils_example_tree() span_data = SpansBySamples(ts) prior_distr = 'lognorm' priors = ConditionalCoalescentTimes(None, prior_distr=prior_distr) priors.add(ts.num_samples, approximate=False) grid = np.array([0, 0.1, 0.2, 0.5, 1, 2, 5]) mixture_prior = priors.get_mixture_prior_params(span_data) prior_vals = fill_priors(mixture_prior, grid, ts, prior_distr=prior_distr) prior_vals.grid_data[0] = [0, 0.5, 0.3, 0.1, 0.05, 0.02, 0.03] prior_vals.grid_data[1] = [0, 0.05, 0.1, 0.2, 0.45, 0.1, 0.1] theta = 2 eps = 0.01 lls = Likelihoods(ts, grid, theta, eps=eps, normalize=False) lls.precalculate_mutation_likelihoods() algo = InOutAlgorithms(prior_vals, lls) algo.inside_pass(normalize=False, cache_inside=cache_inside) algo.outside_pass(normalize=False) self.assertTrue( np.allclose(np.sum(algo.inside.grid_data * algo.outside.grid_data, axis=1), [7.44449E-05, 7.44449E-05])) self.assertTrue( np.allclose(np.sum(algo.inside.grid_data * algo.outside.grid_data, axis=1), np.sum(algo.inside.grid_data[-1]))) class TestOutsideEdgesOrdering(unittest.TestCase): """ Test that edges_by_child_desc() and edges_by_child_then_parent_desc() order edges correctly. """ def edges_ordering(self, ts, fn): fixed_nodes = set(ts.samples()) priors = tsdate.build_prior_grid(ts) theta = None liklhd = LogLikelihoods(ts, priors.timepoints, theta, eps=1e-6, fixed_node_set=fixed_nodes, progress=False) dynamic_prog = InOutAlgorithms(priors, liklhd, progress=False) if fn == "outside_pass": edges_by_child = dynamic_prog.edges_by_child_desc() seen_children = list() last_child_time = None for child, edges in edges_by_child: for edge in edges: self.assertTrue(edge.child not in seen_children) cur_child_time = ts.tables.nodes.time[child] if last_child_time: self.assertTrue(cur_child_time <= last_child_time) seen_children.append(child) last_child_time = ts.tables.nodes.time[child] elif fn == "outside_maximization": edges_by_child = dynamic_prog.edges_by_child_then_parent_desc() seen_children = list() last_child_time = None for child, edges in edges_by_child: last_parent_time = None for edge in edges: cur_parent_time = ts.tables.nodes.time[edge.parent] if last_parent_time: self.assertTrue(cur_parent_time >= last_parent_time) last_parent_time = cur_parent_time self.assertTrue(child not in seen_children) cur_child_time = ts.tables.nodes.time[child] if last_child_time: self.assertTrue(cur_child_time <= last_child_time) seen_children.append(child) last_child_time = ts.tables.nodes.time[child] def test_two_tree_outside_traversal(self): """ This is for the outside algorithm, where we simply want to traverse the ts from oldest child nodes to youngest, grouping all child nodes of same id together. In the outside maximization algorithm, we want to traverse the ts from oldest child nodes to youngest, grouping all child nodes of same id together. """ ts = utility_functions.two_tree_two_mrcas() self.edges_ordering(ts, "outside_pass") self.edges_ordering(ts, "outside_maximization") def test_simulated_inferred_outside_traversal(self): ts = msprime.simulate(500, Ne=10000, length=5e4, mutation_rate=1e-8, recombination_rate=1e-8, random_seed=12) sample_data = tsinfer.SampleData.from_tree_sequence(ts, use_sites_time=False) inferred_ts = tsinfer.infer(sample_data) self.edges_ordering(inferred_ts, "outside_pass") self.edges_ordering(inferred_ts, "outside_maximization") class TestMaximization(unittest.TestCase): """ Test the outside maximization function """ def run_outside_maximization(self, ts, prior_distr="lognorm"): priors = tsdate.build_prior_grid(ts, prior_distribution=prior_distr) Ne = 0.5 theta = 1 eps = 1e-6 lls = Likelihoods(ts, priors.timepoints, theta, eps=eps) lls.precalculate_mutation_likelihoods() algo = InOutAlgorithms(priors, lls) algo.inside_pass() return lls, algo, algo.outside_maximization(Ne, eps=eps) def test_one_tree_n2(self): ts = utility_functions.single_tree_ts_n2() for prior_distr in ('lognorm', 'gamma'): lls, algo, maximized_ages = self.run_outside_maximization(ts, prior_distr) self.assertTrue(np.array_equal( maximized_ages, np.array([0, 0, lls.timepoints[np.argmax(algo.inside[2])]]))) def test_one_tree_n3(self): ts = utility_functions.single_tree_ts_n3() for prior_distr in ('lognorm', 'gamma'): lls, algo, maximized_ages = self.run_outside_maximization(ts, prior_distr) node_4 = lls.timepoints[np.argmax(algo.inside[4])] ll_mut = scipy.stats.poisson.pmf( 0, (node_4 - lls.timepoints[:np.argmax(algo.inside[4]) + 1] + 1e-6) * 1 / 2 * 1) result = ll_mut / np.max(ll_mut) inside_val = algo.inside[3][:(np.argmax(algo.inside[4]) + 1)] node_3 = lls.timepoints[np.argmax( result[:np.argmax(algo.inside[4]) + 1] * inside_val)] self.assertTrue(np.array_equal( maximized_ages, np.array([0, 0, 0, node_3, node_4]))) def test_two_tree_ts(self): ts = utility_functions.two_tree_ts() for prior_distr in ('lognorm', 'gamma'): lls, algo, maximized_ages = self.run_outside_maximization(ts, prior_distr) node_5 = lls.timepoints[np.argmax(algo.inside[5])] ll_mut = scipy.stats.poisson.pmf( 0, (node_5 - lls.timepoints[:np.argmax(algo.inside[5]) + 1] + 1e-6) * 1 / 2 * 0.8) result = ll_mut / np.max(ll_mut) inside_val = algo.inside[4][:(np.argmax(algo.inside[5]) + 1)] node_4 = lls.timepoints[np.argmax( result[:np.argmax(algo.inside[5]) + 1] * inside_val)] ll_mut = scipy.stats.poisson.pmf( 0, (node_4 - lls.timepoints[:np.argmax(algo.inside[4]) + 1] + 1e-6) * 1 / 2 * 0.2) result = ll_mut / np.max(ll_mut) inside_val = algo.inside[3][:(np.argmax(algo.inside[4]) + 1)] node_3 = lls.timepoints[np.argmax( result[:np.argmax(algo.inside[4]) + 1] * inside_val)] self.assertTrue(np.array_equal( maximized_ages, np.array([0, 0, 0, node_3, node_4, node_5]))) class TestDate(unittest.TestCase): """ Test inputs to tsdate.date() """ def test_date_input(self): ts = utility_functions.single_tree_ts_n2() self.assertRaises(ValueError, tsdate.date, ts, 1, method="foobar") def test_sample_as_parent_fails(self): ts = utility_functions.single_tree_ts_n3_sample_as_parent() self.assertRaises(NotImplementedError, tsdate.date, ts, 1) def test_recombination_not_implemented(self): ts = utility_functions.single_tree_ts_n2() self.assertRaises(NotImplementedError, tsdate.date, ts, 1, recombination_rate=1e-8) class TestBuildPriorGrid(unittest.TestCase): """ Test tsdate.build_prior_grid() works as expected """ def test_bad_timepoints(self): ts = msprime.simulate(2, random_seed=123) for bad in [-1, np.array([1]), np.array([-1, 2, 3]), np.array([1, 1, 1]), "foobar"]: self.assertRaises(ValueError, tsdate.build_prior_grid, ts, timepoints=bad) for bad in [np.array(["hello", "there"])]: self.assertRaises(TypeError, tsdate.build_prior_grid, ts, timepoints=bad) def test_bad_prior_distr(self): ts = msprime.simulate(2, random_seed=12) self.assertRaises(ValueError, tsdate.build_prior_grid, ts, prior_distribution="foobar") class TestPosteriorMeanVar(unittest.TestCase): """ Test posterior_mean_var works as expected """ def test_posterior_mean_var(self): ts = utility_functions.single_tree_ts_n2() grid = np.array([0, 1.2, 2]) for distr in ('gamma', 'lognorm'): posterior, algo = TestTotalFunctionalValueTree().find_posterior(ts, distr) ts_node_metadata, mn_post, vr_post = posterior_mean_var( ts, grid, posterior, 0.5) self.assertTrue(np.array_equal(mn_post, [0, 0, np.sum(grid * posterior[2]) / np.sum(posterior[2])])) def test_node_metadata_single_tree_n2(self): ts = utility_functions.single_tree_ts_n2() grid = np.array([0, 1.2, 2]) posterior, algo = TestTotalFunctionalValueTree().find_posterior(ts, "lognorm") ts_node_metadata, mn_post, vr_post = posterior_mean_var(ts, grid, posterior, 0.5) self.assertTrue(json.loads( ts_node_metadata.node(2).metadata)["mn"] == mn_post[2]) self.assertTrue(json.loads( ts_node_metadata.node(2).metadata)["vr"] == vr_post[2]) def test_node_metadata_simulated_tree(self): larger_ts = msprime.simulate( 10, mutation_rate=1, recombination_rate=1, length=20) _, mn_post, _, _, eps, _ = get_dates(larger_ts, 10000) dated_ts = date(larger_ts, 10000) metadata = dated_ts.tables.nodes.metadata metadata_offset = dated_ts.tables.nodes.metadata_offset unconstrained_mn = [ json.loads(met.decode())["mn"] for met in tskit.unpack_bytes( metadata, metadata_offset) if len(met.decode()) > 0] self.assertTrue(np.array_equal(unconstrained_mn, mn_post[larger_ts.num_samples:])) self.assertTrue(np.all( dated_ts.tables.nodes.time[larger_ts.num_samples:] >= mn_post[larger_ts.num_samples:])) class TestConstrainAgesTopo(unittest.TestCase): """ Test constrain_ages_topo works as expected """ def test_constrain_ages_topo(self): """ Set node 3 to be older than node 4 in two_tree_ts """ ts = utility_functions.two_tree_ts() post_mn = np.array([0.0, 0.0, 0.0, 2.0, 1.0, 3.0]) eps = 1e-6 nodes_to_date = np.array([3, 4, 5]) constrained_ages = constrain_ages_topo(ts, post_mn, eps, nodes_to_date) self.assertTrue( np.array_equal( np.array([0.0, 0.0, 0.0, 2.0, 2.000001, 3.0]), constrained_ages ) ) def test_constrain_ages_topo_no_nodes_to_date(self): ts = utility_functions.two_tree_ts() post_mn = np.array([0.0, 0.0, 0.0, 2.0, 1.0, 3.0]) eps = 1e-6 nodes_to_date = None constrained_ages = constrain_ages_topo(ts, post_mn, eps, nodes_to_date) self.assertTrue( np.array_equal( np.array([0.0, 0.0, 0.0, 2.0, 2.000001, 3.0]), constrained_ages ) ) def test_constrain_ages_topo_unary_nodes_unordered(self): ts = utility_functions.single_tree_ts_with_unary() post_mn = np.array([0.0, 0.0, 0.0, 2.0, 1.0, 0.5, 5.0, 1.0]) eps = 1e-6 constrained_ages = constrain_ages_topo(ts, post_mn, eps) self.assertTrue( np.allclose( np.array([0.0, 0.0, 0.0, 2.0, 2.000001, 2.000002, 5.0, 5.000001]), constrained_ages, ) ) def test_constrain_ages_topo_part_dangling(self): ts = utility_functions.two_tree_ts_n2_part_dangling() post_mn = np.array([1.0, 0.0, 0.0, 0.1, 0.05]) eps = 1e-6 constrained_ages = constrain_ages_topo(ts, post_mn, eps) self.assertTrue( np.allclose(np.array([1.0, 0.0, 0.0, 1.000001, 1.000002]), constrained_ages) ) def test_constrain_ages_topo_sample_as_parent(self): ts = utility_functions.single_tree_ts_n3_sample_as_parent() post_mn = np.array([0.0, 0.0, 0.0, 3.0, 1.0]) eps = 1e-6 constrained_ages = constrain_ages_topo(ts, post_mn, eps) self.assertTrue( np.allclose(np.array([0.0, 0.0, 0.0, 3.0, 3.000001]), constrained_ages) ) def test_two_tree_ts_n3_non_contemporaneous(self): ts = utility_functions.two_tree_ts_n3_non_contemporaneous() post_mn = np.array([0.0, 0.0, 3.0, 4.0, 0.1, 4.1]) eps = 1e-6 constrained_ages = constrain_ages_topo(ts, post_mn, eps) self.assertTrue( np.allclose(np.array([0.0, 0.0, 3.0, 4.0, 4.000001, 4.1]), constrained_ages) ) class TestPreprocessTs(unittest.TestCase): """ Test preprocess_ts works as expected """ def verify(self, ts, minimum_gap=None, remove_telomeres=None, **kwargs): with self.assertLogs("tsdate.util", level="INFO") as logs: if minimum_gap is not None and remove_telomeres is not None: ts = tsdate.preprocess_ts(ts, minimum_gap=minimum_gap, remove_telomeres=remove_telomeres) elif minimum_gap is not None and remove_telomeres is None: ts = tsdate.preprocess_ts(ts, minimum_gap=minimum_gap) elif remove_telomeres is not None and minimum_gap is None: ts = tsdate.preprocess_ts(ts, remove_telomeres=remove_telomeres) else: ts = tsdate.preprocess_ts(ts, **kwargs) messages = [record.msg for record in logs.records] self.assertIn("Beginning preprocessing", messages) return ts def test_no_sites(self): ts = utility_functions.two_tree_ts() self.assertRaises(ValueError, tsdate.preprocess_ts, ts) def test_invariant_sites(self): # Test that passing kwargs to simplify works as expected ts = utility_functions.site_no_mutations() with warnings.catch_warnings(record=True) as w: removed = self.verify(ts) self.assertTrue(removed.num_sites == 0) self.assertTrue(len(w) == 1) self.assertTrue( tsdate.preprocess_ts( ts, **{"filter_sites": False}).num_sites == ts.num_sites) def test_no_intervals(self): ts = utility_functions.two_tree_mutation_ts() self.assertTrue( ts.tables.edges == self.verify(ts, remove_telomeres=False).tables.edges) self.assertTrue( ts.tables.edges == self.verify(ts, minimum_gap=0.05).tables.edges) def test_delete_interval(self): ts = utility_functions.ts_w_data_desert(40, 60, 100) trimmed = self.verify(ts, minimum_gap=20, remove_telomeres=False) lefts = trimmed.tables.edges.left rights = trimmed.tables.edges.right self.assertTrue( not np.any(np.logical_and(lefts > 41, lefts < 59))) self.assertTrue( not np.any(np.logical_and(rights > 41, rights < 59))) def test_remove_telomeres(self): ts = utility_functions.ts_w_data_desert(0, 5, 100) removed = self.verify(ts, minimum_gap=ts.get_sequence_length()) lefts = removed.tables.edges.left rights = removed.tables.edges.right self.assertTrue( not np.any(np.logical_and(lefts > 0, lefts < 4))) self.assertTrue( not np.any(np.logical_and(rights > 0, rights < 4))) ts = utility_functions.ts_w_data_desert(95, 100, 100) removed = self.verify(ts, minimum_gap=ts.get_sequence_length()) lefts = removed.tables.edges.left rights = removed.tables.edges.right self.assertTrue( not np.any(np.logical_and(lefts > 96, lefts < 100))) self.assertTrue( not np.any(np.logical_and(rights > 96, rights < 100))) class TestNodeTimes(unittest.TestCase): """ Test node_times works as expected. """ def test_node_times(self): larger_ts = msprime.simulate( 10, mutation_rate=1, recombination_rate=1, length=20) dated = date(larger_ts, 10000) node_ages = nodes_time(dated) self.assertTrue(np.all(dated.tables.nodes.time[:] >= node_ages)) def test_fails_unconstrained(self): ts = utility_functions.two_tree_mutation_ts() self.assertRaises(ValueError, nodes_time, ts, unconstrained=True) class TestSiteTimes(unittest.TestCase): """ Test sites_time works as expected """ def test_no_sites(self): ts = utility_functions.two_tree_ts() self.assertRaises(ValueError, tsdate.sites_time_from_ts, ts) def test_mutation_age_param(self): ts = utility_functions.two_tree_mutation_ts() self.assertRaises( ValueError, tsdate.sites_time_from_ts, ts, mutation_age="sibling") def test_sites_time_insideoutside(self): ts = utility_functions.two_tree_mutation_ts() dated = tsdate.date(ts, 1) _, mn_post, _, _, eps, _ = get_dates(ts, 1) self.assertTrue(np.array_equal( mn_post[ts.tables.mutations.node], tsdate.sites_time_from_ts(dated, unconstrained=True))) self.assertTrue(np.array_equal( dated.tables.nodes.time[ts.tables.mutations.node], tsdate.sites_time_from_ts(dated, unconstrained=False))) def test_sites_time_maximization(self): ts = utility_functions.two_tree_mutation_ts() dated = tsdate.date(ts, Ne=1, mutation_rate=1, method="maximization") self.assertTrue(np.array_equal( dated.tables.nodes.time[ts.tables.mutations.node], tsdate.sites_time_from_ts(dated, unconstrained=False))) def test_sites_time_mutation_age(self): ts = utility_functions.two_tree_mutation_ts() dated = tsdate.date(ts, Ne=1, mutation_rate=1) sites_time_child = tsdate.sites_time_from_ts(dated, mutation_age="child") dated_nodes_time = nodes_time(dated) self.assertTrue(np.array_equal( dated_nodes_time[ts.tables.mutations.node], sites_time_child)) sites_time_parent = tsdate.sites_time_from_ts(dated, mutation_age="parent") parent_sites_check = np.zeros(dated.num_sites) for tree in dated.trees(): for site in tree.sites(): for mut in site.mutations: parent_sites_check[site.id] = dated_nodes_time[tree.parent(mut.node)] self.assertTrue(np.array_equal(parent_sites_check, sites_time_parent)) sites_time_arithmetic = tsdate.sites_time_from_ts( dated, mutation_age="arithmetic") arithmetic_sites_check = np.zeros(dated.num_sites) for tree in dated.trees(): for site in tree.sites(): for mut in site.mutations: arithmetic_sites_check[site.id] = ( dated_nodes_time[mut.node] + dated_nodes_time[tree.parent(mut.node)]) / 2 self.assertTrue(np.array_equal( arithmetic_sites_check, sites_time_arithmetic)) sites_time_geometric = tsdate.sites_time_from_ts(dated, mutation_age="geometric") geometric_sites_check = np.zeros(dated.num_sites) for tree in dated.trees(): for site in tree.sites(): for mut in site.mutations: geometric_sites_check[site.id] = np.sqrt( dated_nodes_time[mut.node] * dated_nodes_time[tree.parent(mut.node)]) self.assertTrue(np.array_equal( geometric_sites_check, sites_time_geometric)) def test_sites_time_multiallelic(self): ts = utility_functions.single_tree_ts_2mutations_multiallelic_n3() sites_time = tsdate.sites_time_from_ts(ts, unconstrained=False) self.assertTrue(np.array_equal( [np.max(ts.tables.nodes.time[ts.tables.mutations.node])], sites_time)) sites_time = tsdate.sites_time_from_ts( ts, unconstrained=False, ignore_multiallelic=False) self.assertTrue(math.isnan(sites_time[0])) def test_sites_time_singletons(self): ts = utility_functions.single_tree_ts_2mutations_singletons_n3() sites_time = tsdate.sites_time_from_ts(ts, unconstrained=False) self.assertTrue(np.array_equal(sites_time, [1e-6])) def test_sites_time_multiple_mutations(self): ts = utility_functions.single_tree_ts_2mutations_n3() sites_time = tsdate.sites_time_from_ts(ts, unconstrained=False) self.assertTrue(np.array_equal(sites_time, [1])) def test_sites_time_simulated(self): larger_ts = msprime.simulate( 10, mutation_rate=1, recombination_rate=1, length=20) _, mn_post, _, _, eps, _ = get_dates(larger_ts, 10000) dated = date(larger_ts, 10000) self.assertTrue( np.array_equal(mn_post[larger_ts.tables.mutations.node], tsdate.sites_time_from_ts(dated, unconstrained=True))) self.assertTrue(np.array_equal( dated.tables.nodes.time[larger_ts.tables.mutations.node], tsdate.sites_time_from_ts(dated, unconstrained=False))) class TestSampleDataTimes(unittest.TestCase): """ Test add_sampledata_times """ def test_wrong_number_of_sites(self): ts = utility_functions.single_tree_ts_2mutations_n3() sites_time = tsdate.sites_time_from_ts(ts, unconstrained=False) sites_time = np.append(sites_time, [10]) samples = tsinfer.formats.SampleData.from_tree_sequence( ts, use_sites_time=False) self.assertRaises(ValueError, tsdate.add_sampledata_times, samples, sites_time) def test_historic_samples(self): samples = [msprime.Sample(population=0, time=0) for i in range(10)] ancients = [msprime.Sample(population=0, time=1000) for i in range(10)] samps = samples + ancients ts = msprime.simulate(samples=samps, mutation_rate=1e-8, recombination_rate=1e-8, Ne=10000, length=1e4) samples = tsinfer.formats.SampleData.from_tree_sequence( ts, use_individuals_time=True) ancient_samples = np.where( ts.tables.nodes.time[:][ts.samples()] != 0)[0].astype('int32') ancient_samples_times = ts.tables.nodes.time[ancient_samples] inferred = tsinfer.infer(samples) dated = date(inferred, 10000, 1e-8) sites_time = tsdate.sites_time_from_ts(dated) dated_samples = tsdate.add_sampledata_times(samples, sites_time) for variant in ts.variants(samples=ancient_samples): if np.any(variant.genotypes == 1): ancient_bound = np.max(ancient_samples_times[variant.genotypes == 1]) self.assertTrue( dated_samples.sites_time[variant.site.id] >= ancient_bound) def test_sampledata(self): samples = [msprime.Sample(population=0, time=0) for i in range(10)] ancients = [msprime.Sample(population=0, time=1000) for i in range(10)] samps = samples + ancients ts = msprime.simulate(samples=samps, mutation_rate=1e-8, recombination_rate=1e-8, Ne=10000, length=1e4) samples = tsinfer.formats.SampleData.from_tree_sequence( ts, use_sites_time=False) inferred = tsinfer.infer(samples) dated = date(inferred, 10000, 1e-8) sites_time = tsdate.sites_time_from_ts(dated) sites_bound = samples.min_site_times(individuals_only=True) check_sites_time =
np.maximum(sites_time, sites_bound)
numpy.maximum
# coding: utf-8 from __future__ import print_function from __future__ import division import torch import torch.nn as nn import torch.nn.functional as F from libcity.model.abstract_model import AbstractModel from math import sin, cos, sqrt, atan2, radians import numpy as np def identity_loss(y_true, y_pred): return torch.mean(y_pred - 0 * y_true) class CARA1(nn.Module): def hard_sigmoid(self, x): x = torch.tensor(x / 6 + 0.5) x = F.threshold(-x, -1, -1) x = F.threshold(-x, 0, 0) return x def __init__(self, output_dim, input_dim, init='glorot_uniform', inner_init='orthogonal', **kwargs): super(CARA1, self).__init__() self.output_dim = output_dim self.init = init self.inner_init = inner_init self.activation = self.hard_sigmoid self.inner_activation = nn.Tanh() self.build(input_dim) def add_weight(self, shape, initializer): ts = torch.zeros(shape) if initializer == 'glorot_uniform': ts = nn.init.xavier_normal_(ts) elif initializer == 'orthogonal': ts = nn.init.orthogonal_(ts) return nn.Parameter(ts) def build(self, input_shape): # self.input_spec = [InputSpec(shape=input_shape)] self.input_dim = input_shape self.W_z = self.add_weight((self.input_dim, self.output_dim), initializer=self.init) self.U_z = self.add_weight((self.output_dim, self.output_dim), initializer=self.init) self.b_z = self.add_weight((self.output_dim,), initializer='zero') self.W_r = self.add_weight((self.input_dim, self.output_dim), initializer=self.init) self.U_r = self.add_weight((self.output_dim, self.output_dim), initializer=self.init) self.b_r = self.add_weight((self.output_dim,), initializer='zero') self.W_h = self.add_weight((self.input_dim, self.output_dim), initializer=self.init) self.U_h = self.add_weight((self.output_dim, self.output_dim), initializer=self.init) self.b_h = self.add_weight((self.output_dim,), initializer='zero') self.A_h = self.add_weight((self.output_dim, self.output_dim), initializer=self.init) self.A_u = self.add_weight((self.output_dim, self.output_dim), initializer=self.init) self.b_a_h = self.add_weight((self.output_dim,), initializer='zero') self.b_a_u = self.add_weight((self.output_dim,), initializer='zero') self.W_t = self.add_weight((self.input_dim, self.output_dim), initializer=self.init) self.U_t = self.add_weight((1, self.output_dim), initializer=self.init) self.b_t = self.add_weight((self.output_dim,), initializer='zero') self.W_g = self.add_weight((self.input_dim, self.output_dim), initializer=self.init) self.U_g = self.add_weight((1, self.output_dim), initializer=self.init) self.b_g = self.add_weight((self.output_dim,), initializer='zero') def preprocess_input(self, x): return x def forward(self, x): """ X : batch * timeLen * dims(有拓展) """ tlen = x.shape[1] output = torch.zeros((x.shape[0], self.output_dim)) for i in range(tlen): output = self.step(x[:, i, :], output) return output def step(self, x, states): """ 用于多批次同一时间 states为上一次多批次统一时间数据 """ h_tm1 = states # phi_t u = x[:, self.output_dim: 2 * self.output_dim] # delta_t t = x[:, 2 * self.output_dim: (2 * self.output_dim) + 1] # delta_g g = x[:, (2 * self.output_dim) + 1:] # phi_v x = x[:, :self.output_dim] t = self.inner_activation(torch.matmul(t, self.U_t)) g = self.inner_activation(torch.matmul(g, self.U_g)) # Time-based gate t1 = self.inner_activation(torch.matmul(x, self.W_t) + t + self.b_t) # Geo-based gate g1 = self.inner_activation(torch.matmul(x, self.W_g) + g + self.b_g) # Contextual Attention Gate a = self.inner_activation( torch.matmul(h_tm1, self.A_h) + torch.matmul(u, self.A_u) + self.b_a_h + self.b_a_u) x_z = torch.matmul(x, self.W_z) + self.b_z x_r = torch.matmul(x, self.W_r) + self.b_r x_h = torch.matmul(x, self.W_h) + self.b_h u_z_ = torch.matmul((1 - a) * u, self.W_z) + self.b_z u_r_ = torch.matmul((1 - a) * u, self.W_r) + self.b_r u_h_ = torch.matmul((1 - a) * u, self.W_h) + self.b_h u_z = torch.matmul(a * u, self.W_z) + self.b_z u_r = torch.matmul(a * u, self.W_r) + self.b_r u_h = torch.matmul(a * u, self.W_h) + self.b_h # update gate z = self.inner_activation(x_z + torch.matmul(h_tm1, self.U_z) + u_z) # reset gate r = self.inner_activation(x_r + torch.matmul(h_tm1, self.U_r) + u_r) # hidden state hh = self.activation(x_h + torch.matmul(r * t1 * g1 * h_tm1, self.U_h) + u_h) h = z * h_tm1 + (1 - z) * hh h = (1 + u_z_ + u_r_ + u_h_) * h return h # return h def bpr_triplet_loss(x): positive_item_latent, negative_item_latent = x reg = 0 loss = 1 - torch.log(torch.sigmoid( torch.sum(positive_item_latent, dim=-1, keepdim=True) - torch.sum(negative_item_latent, dim=-1, keepdim=True))) - reg return loss class Recommender(nn.Module): def __init__(self, num_users, num_items, num_times, latent_dim, maxvenue=5): super(Recommender, self).__init__() self.maxVenue = maxvenue self.latent_dim = latent_dim # num * maxVenue * dim self.U_Embedding = nn.Embedding(num_users, latent_dim) self.V_Embedding = nn.Embedding(num_items, latent_dim) self.T_Embedding = nn.Embedding(num_times, latent_dim) torch.nn.init.uniform_(self.U_Embedding.weight) torch.nn.init.uniform_(self.V_Embedding.weight) torch.nn.init.uniform_(self.T_Embedding.weight) self.rnn = nn.Sequential( CARA1(latent_dim, latent_dim, input_shape=(self.maxVenue, (self.latent_dim * 2) + 2,), unroll=True)) # latent_dim * 2 + 2 = v_embedding + t_embedding + time_gap + distance def forward(self, x): # INPUT = [self.user_input, self.time_input, self.gap_time_input, self.pos_distance_input, # self.neg_distance_input, self.checkins_input, # self.neg_checkins_input] # pass # User latent factor user_input = torch.tensor(x[0]) time_input = torch.tensor(x[1]) gap_time_input = torch.tensor(x[2], dtype=torch.float32) pos_distance_input = torch.tensor(x[3], dtype=torch.float32) neg_distance_input = torch.tensor(x[4], dtype=torch.float32) checkins_input = torch.tensor(x[5]) neg_checkins_input = torch.tensor(x[6]) self.u_latent = self.U_Embedding(user_input) self.t_latent = self.T_Embedding(time_input) h, w = gap_time_input.shape gap_time_input = gap_time_input.view(h, w, 1) rnn_input = torch.cat([self.V_Embedding(checkins_input), self.T_Embedding(time_input), gap_time_input], -1) neg_rnn_input = torch.cat([self.V_Embedding(neg_checkins_input), self.T_Embedding(time_input), gap_time_input], -1) h, w = pos_distance_input.shape pos_distance_input = pos_distance_input.view(h, w, 1) h, w = neg_distance_input.shape neg_distance_input = neg_distance_input.view(h, w, 1) rnn_input = torch.cat([rnn_input, pos_distance_input], -1) neg_rnn_input = torch.cat([neg_rnn_input, neg_distance_input], -1) self.checkins_emb = self.rnn(rnn_input) self.neg_checkins_emb = self.rnn(neg_rnn_input) pred = (self.checkins_emb * self.u_latent).sum(dim=1) neg_pred = (self.neg_checkins_emb * self.u_latent).sum(dim=1) return bpr_triplet_loss([pred, neg_pred]) def rank(self, uid, hist_venues, hist_times, hist_time_gap, hist_distances): # hist_venues = hist_venues + [candidate_venue] # hist_times = hist_times + [time] # hist_time_gap = hist_time_gap + [time_gap] # hist_distances = hist_distances + [distance] # u_latent = self.U_Embedding(torch.tensor(uid)) # v_latent = self.V_Embedding(torch.tensor(hist_venues)) # t_latent = self.T_Embedding(torch.tensor(hist_times)) u_latent = self.U_Embedding.weight[uid] v_latent = self.V_Embedding.weight[hist_venues.reshape(-1)].view(hist_venues.shape[0], hist_venues.shape[1], -1) t_latent = self.T_Embedding.weight[hist_times.reshape(-1)].view(hist_times.shape[0], hist_times.shape[1], -1) h, w = hist_time_gap.shape hist_time_gap = hist_time_gap.reshape(h, w, 1) h, w = hist_distances.shape hist_distances = hist_distances.reshape(h, w, 1) rnn_input = torch.cat([t_latent, torch.tensor(hist_time_gap, dtype=torch.float32)], dim=-1) rnn_input = torch.cat([rnn_input, torch.tensor(hist_distances, dtype=torch.float32)], dim=-1) rnn_input = torch.cat([v_latent, rnn_input], dim=-1) dynamic_latent = self.rnn(rnn_input) scores = torch.mul(dynamic_latent, u_latent).sum(1) # scores = np.dot(dynamic_latent, u_latent) return scores class CARA(AbstractModel): """rnn model with long-term history attention""" def __init__(self, config, data_feature): super(CARA, self).__init__(config, data_feature) self.loc_size = data_feature['loc_size'] self.tim_size = data_feature['tim_size'] self.uid_size = data_feature['uid_size'] self.poi_profile = data_feature['poi_profile'] self.id2locid = data_feature['id2locid'] self.id2loc = [] for i in range(self.loc_size - 1): self.id2loc.append(self.id2locid[str(i)]) self.id2loc.append(self.loc_size) self.id2loc = np.array(self.id2loc) self.coor = self.poi_profile['coordinates'].apply(eval) self.rec = Recommender(self.uid_size, self.loc_size, self.tim_size, 10) def get_time_interval(self, x): y = x[:, :-1] y = np.concatenate([x[:, 0, None], y], axis=1) return x - y def get_time_interval2(self, x): y = x[:-1] y = np.concatenate([x[0, None], y], axis=0) return x - y def get_pos_distance(self, x): x = np.array(x.tolist()) y = np.concatenate([x[:, 0, None, :], x[:, :-1, :]], axis=1) r = 6373.0 rx = np.radians(x) ry = np.radians(y) d = x - y a = np.sin(d[:, :, 0] / 2) ** 2 + np.cos(rx[:, :, 0]) * np.cos(ry[:, :, 0]) * np.sin(d[:, :, 1] / 2) ** 2 c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a)) return r * c def get_pos_distance2(self, x): x = np.array(x.tolist()) y =
np.concatenate([x[0, None, :], x[:-1, :]], axis=0)
numpy.concatenate
from tensorflow.keras.preprocessing import image from tensorflow.keras.models import model_from_json import numpy as np import tensorflow.keras.models as models def predict(temp_file): test_image = image.load_img(temp_file, target_size = (224, 224)) test_image = image.img_to_array(test_image) test_image = np.expand_dims(test_image, axis = 0) with open('Model Weights _ Json/model.json','r') as json_file: json_model = json_file.read() model = model_from_json(json_model) model.load_weights('Model Weights _ Json/model_weights.h5') result = model.predict(test_image) return
np.argmax(result)
numpy.argmax
import sys, os, re import numpy as np import scipy as sp import pandas as pd import matplotlib import seaborn as sns from pylab import * from sklearn.decomposition import PCA # from sklearn.manifold import TSNE # from openTSNE import TSNE from tsnecuda import TSNE import random plt.style.use('ggplot') sns.set_palette("husl") matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42 def readFasta(fastaFile): fh = open(fastaFile, 'r') for line in fh: header = "" seq = "" if line[0] == '>': header = line.rstrip()[1:] seq = fh.readline().rstrip() yield [header, seq] fh.close() def calcShannonIndex(uniqueSet): n_unique = len(uniqueSet) pool_size = np.sum([c for c in uniqueSet.values()]) avg_seq_len = np.mean([len(c) for c in uniqueSet.keys()]) H = 0.0 for unique in uniqueSet.items(): pi = float(int(unique[1])/pool_size) H += pi*np.log2(pi) print("Unique sequences: %d\tTotal sequences: %d\nShannon index: %f\tMean sequence length: %f\n" % (n_unique, pool_size, -H, avg_seq_len)) return -H def hamming_distance(s1, s2): """Return the Hamming distance between equal-length sequences""" if len(s1) != len(s2): raise ValueError("Undefined for sequences of unequal length") return sum(el1 != el2 for el1, el2 in zip(s1, s2)) def plot_entropy_over_rounds(fileList, bc_1, bc_2): '''Plots the information entropy of multiple rounds of sequences.''' for file in fileList: unique_sequences = {} print(file) pat_start = r"(" + bc_1 + ")" pat_start = re.compile(pat_start) pat_end = r"(" + bc_2 + ")" pat_end = re.compile(pat_end) for h,seq in readFasta(file): search_start = pat_start.search(seq) search_end = pat_end.search(seq) if search_start and search_end: seq_trimmed = seq[search_start.span()[1]:search_end.span()[0]] if len(seq_trimmed) == 39: if seq_trimmed in unique_sequences: unique_sequences[seq_trimmed] += 1 else: unique_sequences[seq_trimmed] = 1 negH = calcShannonIndex(unique_sequences) def plot_hamming_distance_over_rounds(fileList, bc_1, bc_2, ref): n_col = 5 n_row = 2 plt.figure(figsize=(15, 6)) for i, file in enumerate(fileList): unique_sequences = {} print(file) pat_start = r"(" + bc_1 + ")" pat_start = re.compile(pat_start) pat_end = r"(" + bc_2 + ")" pat_end = re.compile(pat_end) for h,seq in readFasta(file): search_start = pat_start.search(seq) search_end = pat_end.search(seq) if search_start and search_end: seq_trimmed = seq[search_start.span()[1]:search_end.span()[0]] if len(seq_trimmed) == len(ref): if seq_trimmed in unique_sequences: unique_sequences[seq_trimmed] += 1 else: unique_sequences[seq_trimmed] = 1 hd_list = [] hd_dist = np.zeros(len(ref)) for uniqe_seq in unique_sequences: hd = hamming_distance(uniqe_seq, ref) hd_list.append(hd) hd_dist[hd] += 1 print("Mean HD: %f" % (np.mean(hd_list))) print("HD distribution: ", hd_dist) plt.subplot(n_row, n_col, i+1) plt.title("A1 R%d" % (i)) plt.xlabel("HD from 359") plt.ylabel("Count") plt.bar(np.arange(hd_dist.shape[0]), hd_dist) plt.tight_layout() plt.show(block=True) # plt.savefig('randomer_fig4_hd_from_A1_359.pdf', dpi=300, bbox_inches='tight') def gen_rand_seqs(seq_len, n_seq): nt_arr = ['A', 'T', 'C', 'G'] rand_seqs = [] for i in range(n_seq): rand_dna = "" for i in range(seq_len): rand_dna += nt_arr[random.randint(0, 3)] rand_seqs.append(rand_dna) return rand_seqs def plot_hamming_distance_over_rounds_stacked(fileList, bc_1, bc_2, ref, labels=None, add_rand=False, write_csv=False, output_file_name="default"): if write_csv: fh = open(output_file_name + ".csv", 'w') header = "Experiment" for i in range(len(ref)): header += ",%d" % (i) fh.write(header+'\n') n_col = 1 n_row = len(fileList) if add_rand: n_row += 1 plt.figure(figsize=(4, len(fileList))) c = 0 for i, file in enumerate(fileList): unique_sequences = {} print(file) pat_start = r"(" + bc_1 + ")" pat_start = re.compile(pat_start) pat_end = r"(" + bc_2 + ")" pat_end = re.compile(pat_end) for h,seq in readFasta(file): search_start = pat_start.search(seq) search_end = pat_end.search(seq) if search_start and search_end: seq_trimmed = seq[search_start.span()[1]:search_end.span()[0]] if len(seq_trimmed) == len(ref): if seq_trimmed in unique_sequences: unique_sequences[seq_trimmed] += 1 else: unique_sequences[seq_trimmed] = 1 hd_list = [] hd_dist = np.zeros(len(ref)) for uniqe_seq in unique_sequences: hd = hamming_distance(uniqe_seq, ref) hd_list.append(hd) hd_dist[hd] += 1 # if hd <= 10: # print(">%s_%d" %(file, c)) # print(uniqe_seq) c += 1 print("Mean HD: %f" % (np.mean(hd_list))) print("HD distribution: ", hd_dist) if write_csv: csv_line = "%s," % (file) csv_line += ",".join([str(x) for x in hd_dist]) fh.write(csv_line+"\n") plt.subplot(n_row, n_col, i+1) # plt.title("A1 R%d" % (i)) # plt.xlabel("HD from 359") # plt.ylabel("Count") if labels != None: plt.bar(np.arange(hd_dist.shape[0]), hd_dist, label="%s" % (labels[i])) else: plt.bar(np.arange(hd_dist.shape[0]), hd_dist, label="R%d" % (i)) plt.yticks([]) if add_rand == True: plt.xticks([0, 10, 20, 30, 40]) plt.tick_params(axis='x', labelsize=0, length = 3, labelbottom=False) elif add_rand == False: if i != len(fileList)-1: plt.xticks([0, 10, 20, 30, 40]) plt.tick_params(axis='x', labelsize=0, length = 3, labelbottom=False) else: plt.xticks([0, 10, 20, 30, 40]) plt.xlabel("HD from seqT1") plt.legend() if add_rand: ## Random sequences. print("Random") rand_seq = gen_rand_seqs(len(ref), 10000) # Generate 10k random seqs. hd_list = [] hd_dist = np.zeros(len(ref)) for seq in rand_seq: hd = hamming_distance(seq, ref) hd_list.append(hd) hd_dist[hd] += 1 print("Mean HD: %f" % (np.mean(hd_list))) print("HD distribution: ", hd_dist) if write_csv: csv_line = "Random," csv_line += ",".join([str(x) for x in hd_dist]) fh.write(csv_line+"\n") plt.subplot(n_row, n_col, n_row) plt.bar(np.arange(hd_dist.shape[0]), hd_dist, label="Random") plt.yticks([]) plt.legend() plt.xticks([0, 10, 20, 30, 40]) plt.xlabel("HD from seqT1") plt.tight_layout() # plt.show(block=True) plt.savefig(output_file_name + ".pdf", dpi=300, bbox_inches='tight') nt_arr = ['A', 'T', 'C', 'G'] def generate_random_rna(nt_len): rand_rna = "" for i in range(nt_len): rand_rna += nt_arr[random.randint(0, 3)] return rand_rna # encode_dict = { # 'A': 0, # 'T': 1, # 'C': 2, # 'G': 3, # 'N': 4 # } encode_dict = { 'A': [1,0,0], 'T': [1,1,0], 'C': [1,1,0], 'G': [0,0,1], ' ': [0,0,0] } def encode(sequence): encoded =
np.zeros((45, 3), dtype=np.int)
numpy.zeros
# coding: utf-8 # Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department # Distributed under the terms of "New BSD License", see the LICENSE file. from __future__ import division, print_function import ast from copy import copy from collections import OrderedDict from math import cos, sin import numpy as np from six import string_types import warnings from ase.geometry import cellpar_to_cell, complete_cell, get_distances from matplotlib.colors import rgb2hex from scipy.interpolate import interp1d from pyiron.atomistics.structure.atom import Atom from pyiron.atomistics.structure.sparse_list import SparseArray, SparseList from pyiron.atomistics.structure.periodic_table import PeriodicTable, ChemicalElement, ElementColorDictionary from pyiron.base.settings.generic import Settings from scipy.spatial import cKDTree, Voronoi try: import spglib except ImportError: try: import pyspglib as spglib except ImportError: raise ImportError("The spglib package needs to be installed") __author__ = "<NAME>, <NAME>" __copyright__ = "Copyright 2019, Max-Planck-Institut für Eisenforschung GmbH - " \ "Computational Materials Design (CM) Department" __version__ = "1.0" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "production" __date__ = "Sep 1, 2017" s = Settings() class Atoms(object): """ The Atoms class represents all the information required to describe a structure at the atomic scale. This class is written in such a way that is compatible with the `ASE atoms class`_. Some of the functions in this module is based on the corresponding implementation in the ASE package Args: elements (list/numpy.ndarray): List of strings containing the elements or a list of atomistics.structure.periodic_table.ChemicalElement instances numbers (list/numpy.ndarray): List of atomic numbers of elements symbols (list/numpy.ndarray): List of chemical symbols positions (list/numpy.ndarray): List of positions scaled_positions (list/numpy.ndarray): List of scaled positions (relative coordinates) pbc (list/numpy.ndarray/boolean): Tells if periodic boundary conditions should be applied on the three axes cell (list/numpy.ndarray instance): A 3x3 array representing the lattice vectors of the structure Note: Only one of elements/symbols or numbers should be assigned during initialization Attributes: indices (numpy.ndarray): A list of size N which gives the species index of the structure which has N atoms .. _ASE atoms class: https://wiki.fysik.dtu.dk/ase/ase/atoms.html """ def __init__(self, symbols=None, positions=None, numbers=None, tags=None, momenta=None, masses=None, magmoms=None, charges=None, scaled_positions=None, cell=None, pbc=None, celldisp=None, constraint=None, calculator=None, info=None, indices=None, elements=None, dimension=None, species=None, **qwargs): if symbols is not None: if elements is None: elements = symbols else: raise ValueError("Only elements OR symbols should be given.") if tags is not None or momenta is not None or masses is not None or charges is not None \ or celldisp is not None or constraint is not None or calculator is not None or info is not None: s.logger.debug('Not supported parameter used!') self._store_elements = dict() self._species_to_index_dict = None self.colorLut = ElementColorDictionary().to_lut() self._is_scaled = False if cell is not None: # make it ASE compatible if np.linalg.matrix_rank(cell) == 1: cell = np.eye(len(cell)) * cell else: cell = np.array(cell) self._cell = cell self._species = list() self.positions= None self._pse = PeriodicTable() self._tag_list = SparseArray() self.indices = np.array([]) self._info = dict() self.arrays = dict() self.adsorbate_info = {} self.bonds = None self._pbc = False self.dimension = 3 # Default self.units = {"length": "A", "mass": "u"} el_index_lst = list() element_list = None if (elements is None) and (numbers is None) and (indices is None): return if numbers is not None: # for ASE compatibility if not (elements is None): raise AssertionError() elements = self.numbers_to_elements(numbers) if elements is not None: el_object_list = None if isinstance(elements, str): element_list = self.convert_formula(elements) elif isinstance(elements, (list, tuple, np.ndarray)): if not all([isinstance(el, elements[0].__class__) for el in elements]): object_list = list() for el in elements: if isinstance(el, (str, np.str, np.str_)): object_list.append(self.convert_element(el)) if isinstance(el, ChemicalElement): object_list.append(el) if isinstance(el, Atom): object_list.append(el.element) if isinstance(el, (int, np.integer)): # pse = PeriodicTable() object_list.append(self._pse.element(el)) el_object_list = object_list if len(elements) == 0: element_list = elements else: if isinstance(elements[0], (list, tuple, np.ndarray)): elements = np.array(elements).flatten() if isinstance(elements[0], string_types): element_list = elements elif isinstance(elements[0], ChemicalElement): el_object_list = elements elif isinstance(elements[0], Atom): el_object_list = [el.element for el in elements] positions = [el.position for el in elements] elif elements.dtype in [int, np.integer]: el_object_list = self.numbers_to_elements(elements) else: raise ValueError('Unknown static type for element in list: ' + str(type(elements[0]))) if el_object_list is None: el_object_list = [self.convert_element(el) for el in element_list] self.set_species(list(set(el_object_list))) # species_to_index_dict = {el: i for i, el in enumerate(self.species)} el_index_lst = [self._species_to_index_dict[el] for el in el_object_list] elif indices is not None: el_index_lst = indices self.set_species(species) if scaled_positions is not None: if positions is not None: raise ValueError("either position or scaled_positions can be given") if cell is None: raise ValueError('scaled_positions can only be used with a given cell') positions = np.dot(np.array(cell).T, np.array(scaled_positions).T).T if positions is None: self.dimension = 3 if cell is not None: positions = np.zeros((len(el_index_lst), self.dimension)) self.indices = np.array(el_index_lst) self.positions = np.array(positions).astype(np.float) self._tag_list._length = len(positions) for key, val in qwargs.items(): print('set qwargs (ASE): ', key, val) setattr(self, key, val) if len(positions) > 0: self.dimension = len(positions[0]) else: self.dimension = 3 if dimension is not None: self.dimension = dimension if cell is not None: if pbc is None: self.pbc = True # default setting else: self.pbc = pbc self.set_initial_magnetic_moments(magmoms) @property def cell(self): """ numpy.ndarray: A size 3x3 array which gives the lattice vectors of the cell as [a1, a2, a3] """ return self._cell @cell.setter def cell(self, value): if value is None: self._cell = None else: if self._is_scaled: self.set_cell(value, scale_atoms=True) else: self.set_cell(value) @property def species(self): """ list: A list of atomistics.structure.periodic_table.ChemicalElement instances """ return self._species # @species.setter def set_species(self, value): """ Setting the species list Args: value (list): A list atomistics.structure.periodic_table.ChemicalElement instances """ if value is None: return value = list(value) self._species_to_index_dict = {el: i for i, el in enumerate(value)} self._species = value[:] self._store_elements = {el.Abbreviation: el for el in value} @property def info(self): """ dict: This dictionary is merely used to be compatible with the ASE Atoms class. """ return self._info @info.setter def info(self, val): self._info = val @property def pbc(self): """ list: A list of boolean values which gives the periodic boundary consitions along the three axes. The default value is [True, True, True] """ if not isinstance(self._pbc, np.ndarray): self.set_pbc(self._pbc) return self._pbc @pbc.setter def pbc(self, val): self._pbc = val @property def elements(self): """ numpy.ndarray: A size N list of atomistics.structure.periodic_table.ChemicalElement instances according to the ordering of the atoms in the instance """ return np.array([self.species[el] for el in self.indices]) def new_array(self, name, a, dtype=None, shape=None): """ Adding a new array to the instance. This function is for the purpose of compatibility with the ASE package Args: name (str): Name of the array a (list/numpy.ndarray): The array to be added dtype (type): Data type of the array shape (list/turple): Shape of the array """ if dtype is not None: a = np.array(a, dtype, order='C') if len(a) == 0 and shape is not None: a.shape = (-1,) + shape else: if not a.flags['C_CONTIGUOUS']: a = np.ascontiguousarray(a) else: a = a.copy() if name in self.arrays: raise RuntimeError for b in self.arrays.values(): if len(a) != len(b): raise ValueError('Array has wrong length: %d != %d.' % (len(a), len(b))) break if shape is not None and a.shape[1:] != shape: raise ValueError('Array has wrong shape %s != %s.' % (a.shape, (a.shape[0:1] + shape))) self.arrays[name] = a def get_array(self, name, copy=True): """ Get an array. This function is for the purpose of compatibility with the ASE package Args: name (str): Name of the required array copy (bool): True if a copy of the array is to be returned Returns: An array of a copy of the array """ if copy: return self.arrays[name].copy() else: return self.arrays[name] def set_array(self, name, a, dtype=None, shape=None): """ Update array. This function is for the purpose of compatibility with the ASE package Args: name (str): Name of the array a (list/numpy.ndarray): The array to be added dtype (type): Data type of the array shape (list/turple): Shape of the array """ b = self.arrays.get(name) if b is None: if a is not None: self.new_array(name, a, dtype, shape) else: if a is None: del self.arrays[name] else: a = np.asarray(a) if a.shape != b.shape: raise ValueError('Array has wrong shape %s != %s.' % (a.shape, b.shape)) b[:] = a def add_tag(self, *args, **qwargs): """ Add tags to the atoms object. Examples: For selective dynamics:: >>> self.add_tag(selective_dynamics=[False, False, False]) """ self._tag_list.add_tag(*args, **qwargs) # @staticmethod def numbers_to_elements(self, numbers): """ Convert atomic numbers in element objects (needed for compatibility with ASE) Args: numbers (list): List of Element Numbers (as Integers; default in ASE) Returns: list: A list of elements as needed for pyiron """ # pse = PeriodicTable() # TODO; extend to internal PSE which can contain additional elements and tags atom_number_to_element = {} for i_el in set(numbers): i_el = int(i_el) atom_number_to_element[i_el] = self._pse.element(i_el) return [atom_number_to_element[i_el] for i_el in numbers] def copy(self): """ Returns a copy of the instance Returns: pyiron.atomistics.structure.atoms.Atoms: A copy of the instance """ return self.__copy__() def to_hdf(self, hdf, group_name="structure"): """ Save the object in a HDF5 file Args: hdf (pyiron.base.generic.hdfio.FileHDFio): HDF path to which the object is to be saved group_name (str): Group name with which the object should be stored. This same name should be used to retrieve the object """ # import time with hdf.open(group_name) as hdf_structure: # time_start = time.time() hdf_structure["TYPE"] = str(type(self)) for el in self.species: if isinstance(el.tags, dict): with hdf_structure.open("new_species") as hdf_species: el.to_hdf(hdf_species) hdf_structure['species'] = [el.Abbreviation for el in self.species] hdf_structure["indices"] = self.indices with hdf_structure.open("tags") as hdf_tags: for tag in self._tag_list.keys(): tag_value = self._tag_list[tag] if isinstance(tag_value, SparseList): tag_value.to_hdf(hdf_tags, tag) hdf_structure["units"] = self.units hdf_structure["dimension"] = self.dimension if self.cell is not None: with hdf_structure.open("cell") as hdf_cell: hdf_cell["cell"] = self.cell hdf_cell["pbc"] = self.pbc # hdf_structure["coordinates"] = self.positions # "Atomic coordinates" hdf_structure["positions"] = self.positions # "Atomic coordinates" # potentials with explicit bonds (TIP3P, harmonic, etc.) if self.bonds is not None: hdf_structure["explicit_bonds"] = self.bonds # print ('time in atoms.to_hdf: ', time.time() - time_start) def from_hdf(self, hdf, group_name="structure"): """ Retrieve the object from a HDF5 file Args: hdf (pyiron.base.generic.hdfio.FileHDFio): HDF path to which the object is to be saved group_name (str): Group name from which the Atoms object is retreived. Returns: pyiron_atomistic.structure.atoms.Atoms: The retrieved atoms class """ if "indices" in hdf[group_name].list_nodes(): with hdf.open(group_name) as hdf_atoms: if "new_species" in hdf_atoms.list_groups(): with hdf_atoms.open("new_species") as hdf_species: self._pse.from_hdf(hdf_species) el_object_list = [self.convert_element(el, self._pse) for el in hdf_atoms["species"]] self.indices = hdf_atoms["indices"] self._tag_list._length = len(self) self.set_species(el_object_list) self.bonds = None if "explicit_bonds" in hdf_atoms.list_nodes(): # print "bonds: " self.bonds = hdf_atoms["explicit_bonds"] if "tags" in hdf_atoms.list_groups(): with hdf_atoms.open("tags") as hdf_tags: tags = hdf_tags.list_nodes() for tag in tags: # tr_dict = {'0': False, '1': True} if isinstance(hdf_tags[tag], (list, np.ndarray)): my_list = hdf_tags[tag] self._tag_list[tag] = SparseList(my_list, length=len(self)) else: my_dict = hdf_tags.get_pandas(tag).to_dict() my_dict = {i: val for i, val in zip(my_dict["index"], my_dict["values"])} self._tag_list[tag] = SparseList(my_dict, length=len(self)) tr_dict = {1: True, 0: False} self.dimension = hdf_atoms["dimension"] self.units = hdf_atoms["units"] self.cell = None if "cell" in hdf_atoms.list_groups(): with hdf_atoms.open("cell") as hdf_cell: self.cell = hdf_cell["cell"] self.pbc = hdf_cell["pbc"] # Backward compatibility position_tag = "positions" if position_tag not in hdf_atoms.list_nodes(): position_tag = "coordinates" if "is_absolute" in hdf_atoms.list_nodes(): if not tr_dict[hdf_atoms["is_absolute"]]: self.set_scaled_positions(hdf_atoms[position_tag]) else: self.positions = hdf_atoms[position_tag] else: self.positions = hdf_atoms[position_tag] if "bonds" in hdf_atoms.list_nodes(): self.bonds = hdf_atoms["explicit_bonds"] return self else: return self._from_hdf_old(hdf, group_name) def _from_hdf_old(self, hdf, group_name="structure"): """ This function exits merely for the purpose of backward compatibility """ with hdf.open(group_name) as hdf_atoms: self._pse = PeriodicTable() if "species" in hdf_atoms.list_groups(): with hdf_atoms.open("species") as hdf_species: self._pse.from_hdf(hdf_species) chemical_symbols = np.array(hdf_atoms["elements"], dtype=str) el_object_list = [self.convert_element(el, self._pse) for el in chemical_symbols] self.set_species(list(set(el_object_list))) self.indices = [self._species_to_index_dict[el] for el in el_object_list] self._tag_list._length = len(self) self.bonds = None if "explicit_bonds" in hdf_atoms.list_nodes(): # print "bonds: " self.bonds = hdf_atoms["explicit_bonds"] if "tags" in hdf_atoms.list_groups(): with hdf_atoms.open("tags") as hdf_tags: tags = hdf_tags.list_nodes() for tag in tags: # tr_dict = {'0': False, '1': True} if isinstance(hdf_tags[tag], (list, np.ndarray)): my_list = hdf_tags[tag] self._tag_list[tag] = SparseList(my_list, length=len(self)) else: my_dict = hdf_tags.get_pandas(tag).to_dict() my_dict = {i: val for i, val in zip(my_dict["index"], my_dict["values"])} self._tag_list[tag] = SparseList(my_dict, length=len(self)) self.cell = None if "cell" in hdf_atoms.list_groups(): with hdf_atoms.open("cell") as hdf_cell: self.cell = hdf_cell["cell"] self.pbc = hdf_cell["pbc"] tr_dict = {1: True, 0: False} self.dimension = hdf_atoms["dimension"] if "is_absolute" in hdf_atoms and not tr_dict[hdf_atoms["is_absolute"]]: self.positions = hdf_atoms["coordinates"] else: self.set_scaled_positions(hdf_atoms["coordinates"]) self.units = hdf_atoms["units"] if "bonds" in hdf_atoms.list_nodes(): self.bonds = hdf_atoms["explicit_bonds"] return self def center(self, vacuum=None, axis=(0, 1, 2)): """ Center atoms in unit cell. Adopted from ASE code (https://wiki.fysik.dtu.dk/ase/_modules/ase/atoms.html#Atoms.center) Args: vacuum (float): If specified adjust the amount of vacuum when centering. If vacuum=10.0 there will thus be 10 Angstrom of vacuum on each side. axis (tuple/list): List or turple of integers specifying the axis along which the atoms should be centered """ # Find the orientations of the faces of the unit cell c = self.cell if c is None: c = np.identity(self.dimension) self.cell = c dirs = np.zeros_like(c) for i in range(3): dirs[i] = np.cross(c[i - 1], c[i - 2]) dirs[i] /= np.linalg.norm(dirs[i]) # normalize if np.dot(dirs[i], c[i]) < 0.0: dirs[i] *= -1 # Now, decide how much each basis vector should be made longer if isinstance(axis, int): axes = (axis,) else: axes = axis p = self.positions longer = np.zeros(3) shift = np.zeros(3) for i in axes: p0 = np.dot(p, dirs[i]).min() p1 = np.dot(p, dirs[i]).max() height = np.dot(c[i], dirs[i]) if vacuum is not None: lng = (p1 - p0 + 2 * vacuum) - height else: lng = 0.0 # Do not change unit cell size! top = lng + height - p1 shf = 0.5 * (top - p0) cosphi = np.dot(c[i], dirs[i]) / np.linalg.norm(c[i]) longer[i] = lng / cosphi shift[i] = shf / cosphi # Now, do it! translation = np.zeros(3) for i in axes: nowlen = np.sqrt(np.dot(c[i], c[i])) self.cell[i] *= 1 + longer[i] / nowlen translation += shift[i] * c[i] / nowlen self.positions += translation if self.pbc is None: self.pbc = self.dimension * [True] def set_positions(self, positions): """ Set positions. This function is for compatability with ASE Args: positions (numpy.ndarray/list): Positions in absolute coordinates """ self.positions = np.array(positions) self._tag_list._length = len(self) def get_positions(self): """ Get positions. This function is for compatability with ASE Returns: numpy.ndarray: Positions in absolute coordinates """ return self.positions def select_index(self, el): """ Returns the indices of a given element in the structure Args: el (str/atomistics.structures.periodic_table.ChemicalElement/list): Element for which the indices should be returned Returns: numpy.ndarray: An array of indices of the atoms of the given element """ if isinstance(el, str): return np.where(self.get_chemical_symbols()==el)[0] elif isinstance(el, ChemicalElement): return np.where([e==el for e in self.get_chemical_elements()])[0] if isinstance(el, (list, np.ndarray)): if isinstance(el[0], str): return np.where(np.isin(self.get_chemical_symbols(), el))[0] elif isinstance(el[0], ChemicalElement): return np.where([e in el for e in self.get_chemical_elements()])[0] def select_parent_index(self, el): """ Returns the indices of a given element in the structure ignoring user defined elements Args: el (str/atomistics.structures.periodic_table.ChemicalElement): Element for which the indices should be returned Returns: numpy.ndarray: An array of indices of the atoms of the given element """ parent_basis = self.get_parent_basis() return parent_basis.select_index(el) def get_tags(self): """ Returns the keys of the stored tags of the structure Returns: dict_keys: Keys of the stored tags """ return self._tag_list.keys() def get_pbc(self): """ Returns a boolean array of the periodic boundary conditions along the x, y and z axis respectively Returns: numpy.ndarray: Boolean array of length 3 """ if not isinstance(self._pbc, np.ndarray): self.set_pbc(self._pbc) return np.array(self._pbc, bool) def set_pbc(self, value): """ Sets the perioic boundary conditions on all three axis Args: value (numpy.ndarray/list): An array of bool type with length 3 """ if value is None: self._pbc = None else: if isinstance(value, np.ndarray): self._pbc = value elif value in (True, False): value = self.dimension * [value] if not (np.shape(np.array(value)) == (self.dimension,)): raise AssertionError() self._pbc = np.array(value, bool) def convert_element(self, el, pse=None): """ Convert a string or an atom instance into a ChemicalElement instance Args: el (str/atomistics.structure.atom.Atom): String or atom instance from which the element should be generated pse (atomistics.structure.periodictable.PeriodicTable): PeriodicTable instance from which the element is generated (optional) Returns: atomistics.structure.periodictable.ChemicalElement: The required chemical element """ if el in list(self._store_elements.keys()): return self._store_elements[el] if isinstance(el, string_types): # as symbol element = Atom(el, pse=pse).element elif isinstance(el, Atom): element = el.element el = el.element.Abbreviation elif isinstance(el, ChemicalElement): element = el el = el.Abbreviation else: raise ValueError('Unknown static type to specify a element') self._store_elements[el] = element if hasattr(self, 'species'): if element not in self.species: self._species.append(element) self.set_species(self._species) return element def get_chemical_formula(self): """ Returns the chemical formula of structure Returns: str: The chemical formula as a string """ species = self.get_number_species_atoms() formula = "" for string_sym, num in species.items(): if num == 1: formula += str(string_sym) else: formula += str(string_sym) + str(num) return formula def get_chemical_indices(self): """ Returns the list of chemical indices as ordered in self.species Returns: numpy.ndarray: A list of chemical indices """ return self.indices def get_atomic_numbers(self): """ Returns the atomic numbers of all the atoms in the structure Returns: numpy.ndarray: A list of atomic numbers """ el_lst = [el.AtomicNumber for el in self.species] return np.array([el_lst[el] for el in self.indices]) def get_chemical_symbols(self): """ Returns the chemical symbols for all the atoms in the structure Returns: numpy.ndarray: A list of chemical symbols """ el_lst = [el.Abbreviation for el in self.species] return np.array([el_lst[el] for el in self.indices]) def get_parent_symbols(self): """ Returns the chemical symbols for all the atoms in the structure even for user defined elements Returns: numpy.ndarray: A list of chemical symbols """ sp_parent_list = list() for sp in self.species: if isinstance(sp.Parent, (float, np.float, type(None))): sp_parent_list.append(sp.Abbreviation) else: sp_parent_list.append(sp.Parent) return np.array([sp_parent_list[i] for i in self.indices]) def get_parent_basis(self): """ Returns the basis with all user defined/special elements as the it's parent Returns: pyiron.atomistics.structure.atoms.Atoms: Structure without any user defined elements """ parent_basis = copy(self) new_species = np.array(parent_basis.species) for i, sp in enumerate(new_species): if not isinstance(sp.Parent, (float, np.float, type(None))): pse = PeriodicTable() new_species[i] = pse.element(sp.Parent) sym_list = [el.Abbreviation for el in new_species] if len(sym_list) != len(np.unique(sym_list)): uni, ind, inv_ind = np.unique(sym_list, return_index=True, return_inverse=True) new_species = new_species[ind].copy() parent_basis.set_species(list(new_species)) indices_copy = parent_basis.indices.copy() for i, ind_ind in enumerate(inv_ind): indices_copy[parent_basis.indices == i] = ind_ind parent_basis.indices = indices_copy return parent_basis parent_basis.set_species(list(new_species)) return parent_basis def get_chemical_elements(self): """ Returns the list of chemical element instances Returns: numpy.ndarray: A list of chemical element instances """ return self.elements def get_number_species_atoms(self): """ Returns a dictionary with the species in the structure and the corresponding count in the structure Returns: collections.OrderedDict: An ordered dictionary with the species and the corresponding count """ count = OrderedDict() # print "sorted: ", sorted(set(self.elements)) for el in sorted(set(self.get_chemical_symbols())): count[el] = 0 for el in self.get_chemical_symbols(): count[el] += 1 return count def get_species_symbols(self): """ Returns the symbols of the present species Returns: numpy.ndarray: List of the symbols of the species """ return np.array(sorted([el.Abbreviation for el in self.species])) def get_species_objects(self): """ Returns: """ el_set = self.species el_sym_lst = {el.Abbreviation: i for i, el in enumerate(el_set)} el_sorted = self.get_species_symbols() return [el_set[el_sym_lst[el]] for el in el_sorted] def get_number_of_species(self): """ Returns: """ return len(self.species) def get_number_of_degrees_of_freedom(self): """ Returns: """ return len(self) * self.dimension def get_center_of_mass(self): """ Returns: com (float): center of mass in A """ masses = self.get_masses() return np.einsum('i,ij->j', masses, self.positions)/
np.sum(masses)
numpy.sum
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Sep 16 15:18:08 2021. @author: <NAME> """ import os import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib import rc rc('font',**{'family':'serif','serif':['DejaVu Sans']}) rc('text',usetex=True) params={'text.latex.preamble':[r'\usepackage{amsmath}',r'\usepackage{amssymb}']} plt.rcParams.update(params); from scipy import stats # --------------------------------------------------------------------------- # model_name = 'resnet18'#'resnet34'#'efficientnetb0'#'densenet121'# model_name_str = 'ResNet-18'# 'ResNet-34'#'EfficientNet-B0'#'DenseNet-121'#' splitLayer ='add_3'#'block2b_add'#'pool2_conv'#'add_1'# rowsPerPacket = 4 quant1 = 8 quant2 = 8 results_dir = 'simData' dataset = 'largeTest' output_dir = os.path.join('mc_results',dataset,model_name+'_'+splitLayer) os.makedirs(output_dir,exist_ok=True) file_str = 'GC_'+str(quant1)+'Bits_'+str(quant2)+'Bits_rpp_'+str(rowsPerPacket)+'_MC_' # --------------------------------------------------------------------------- # lossProbability = [0.3,0.2,0.1,0.01] burstLength = [1,2,3,4,5,6,7] # [1,2,3,4,5,6,7,8,9,10,11,12,13,14] # step_MC = 5 num_MC = 20 MC_index = [i for i in range(0,num_MC+step_MC,step_MC)] cloud_top1_acc = np.zeros([len(lossProbability),len(burstLength),num_MC],dtype=np.float64) cloud_top5_acc = np.zeros_like(cloud_top1_acc) full_top1_acc = np.zeros_like(cloud_top1_acc) full_top5_acc = np.zeros_like(cloud_top1_acc) caltec_top1_acc = np.zeros_like(cloud_top1_acc) caltec_top5_acc = np.zeros_like(caltec_top1_acc) altec_top1_acc = np.zeros_like(caltec_top1_acc) altec_top5_acc = np.zeros_like(altec_top1_acc) halrtc_top1_acc = np.zeros_like(caltec_top1_acc) halrtc_top5_acc = np.zeros_like(caltec_top5_acc) silrtc_top1_acc = np.zeros_like(halrtc_top1_acc) silrtc_top5_acc = np.zeros_like(silrtc_top1_acc) ns_top1_acc = np.zeros_like(caltec_top1_acc) ns_top5_acc = np.zeros_like(caltec_top5_acc) cloud_top1_mean = np.zeros([len(lossProbability),len(burstLength)],dtype=np.float64) cloud_top5_mean = np.zeros_like(cloud_top1_mean) full_top1_mean = np.zeros_like(cloud_top1_mean) full_top5_mean = np.zeros_like(cloud_top1_mean) caltec_top1_mean = np.zeros_like(cloud_top1_mean) caltec_top5_mean = np.zeros_like(cloud_top5_mean) altec_top1_mean = np.zeros_like(cloud_top1_mean) altec_top5_mean = np.zeros_like(cloud_top5_mean) halrtc_top1_mean = np.zeros_like(caltec_top1_mean) halrtc_top5_mean = np.zeros_like(caltec_top1_mean) silrtc_top1_mean = np.zeros_like(caltec_top1_mean) silrtc_top5_mean = np.zeros_like(caltec_top1_mean) ns_top1_mean = np.zeros_like(caltec_top1_mean) ns_top5_mean = np.zeros_like(caltec_top5_mean) # --------------------------------------------------------------------------- # for i_lp in range(len(lossProbability)): for i_bl in range(len(burstLength)): print(f'loss probability {lossProbability[i_lp]} burst length {burstLength[i_bl]}') df_results = pd.read_csv(os.path.join(results_dir,dataset,model_name,splitLayer+'_lp_'+str(lossProbability[i_lp])+'_Bl_'+str(burstLength[i_bl]),file_str+str(MC_index[0])+'_'+str(MC_index[-1])+'_.csv')) full_top1_acc[i_lp,i_bl,:] = df_results['full_top1_accuracy'].to_numpy() cloud_top1_acc[i_lp,i_bl,:] = df_results['cloud_top1_accuracy'].to_numpy() full_top5_acc[i_lp,i_bl,:] = df_results['full_top5_accuracy'].to_numpy() cloud_top5_acc[i_lp,i_bl,:] = df_results['cloud_top5_accuracy'].to_numpy() full_top1_mean[i_lp,i_bl] = np.mean(full_top1_acc[i_lp,i_bl,:]) full_top5_mean[i_lp,i_bl] = np.mean(full_top5_acc[i_lp,i_bl,:]) cloud_top1_mean[i_lp,i_bl] = np.mean(cloud_top1_acc[i_lp,i_bl,:]) cloud_top5_mean[i_lp,i_bl] =
np.mean(cloud_top5_acc[i_lp,i_bl,:])
numpy.mean
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """" Ernie pretrain data reader """ import collections import re import argparse import random import jieba import numpy as np from opencc import OpenCC from mindspore.mindrecord import FileWriter from src.tokenizer import convert_to_unicode, CharTokenizer from src.utils import get_file_list class ErnieDataReader: """Ernie data reader""" def __init__(self, file_list, vocab_path, short_seq_prob, masked_word_prob, max_predictions_per_seq, dupe_factor, max_seq_len=512, random_seed=1, do_lower_case=True, generate_neg_sample=False): # short_seq_prob, masked_word_prob, max_predictions_per_seq, vocab_words self.vocab = self.load_vocab(vocab_path) self.tokenizer = CharTokenizer( vocab_file=vocab_path, do_lower_case=do_lower_case) self.short_seq_prob = short_seq_prob self.masked_word_prob = masked_word_prob self.max_predictions_per_seq = max_predictions_per_seq self.dupe_factor = dupe_factor self.file_list = file_list self.max_seq_len = max_seq_len self.generate_neg_sample = generate_neg_sample self.global_rng = random.Random(random_seed) self.pad_id = self.vocab["[PAD]"] self.cls_id = self.vocab["[CLS]"] self.sep_id = self.vocab["[SEP]"] self.mask_id = self.vocab["[MASK]"] def load_vocab(self, vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() fin = open(vocab_file) for num, line in enumerate(fin): items = convert_to_unicode(line.strip()).split("\t") if len(items) > 2: break token = items[0] index = items[1] if len(items) == 2 else num token = token.strip() vocab[token] = int(index) return vocab def random_pair_neg_samples(self, pos_samples): """ randomly generate negative samples using pos_samples Args: pos_samples: list of positive samples Returns: neg_samples: list of negative samples """ np.random.shuffle(pos_samples) num_sample = len(pos_samples) neg_samples = [] miss_num = 0 def split_sent(sample, max_len, sep_id): token_ids, _, _, seg_labels, _ = sample sep_index = token_ids.index(sep_id) left_len = sep_index - 1 if left_len <= max_len: return (token_ids[1:sep_index], seg_labels[1:sep_index]) return [ token_ids[sep_index + 1:-1], seg_labels[sep_index + 1:-1] ] for i in range(num_sample): pair_index = (i + 1) % num_sample left_tokens, left_seg_labels = split_sent( pos_samples[i], (self.max_seq_len - 3) // 2, self.sep_id) right_tokens, right_seg_labels = split_sent( pos_samples[pair_index], self.max_seq_len - 3 - len(left_tokens), self.sep_id) token_seq = [self.cls_id] + left_tokens + [self.sep_id] + \ right_tokens + [self.sep_id] if len(token_seq) > self.max_seq_len: miss_num += 1 continue type_seq = [0] * (len(left_tokens) + 2) + [1] * (len(right_tokens) + 1) pos_seq = range(len(token_seq)) seg_label_seq = [-1] + left_seg_labels + [-1] + right_seg_labels + [ -1 ] assert len(token_seq) == len(type_seq) == len(pos_seq) == len(seg_label_seq), \ "[ERROR]len(src_id) == lne(sent_id) == len(pos_id) must be True" neg_samples.append([token_seq, type_seq, pos_seq, seg_label_seq, 0]) return neg_samples, miss_num def mixin_negative_samples(self, sample_generator, buffer=1000): """ 1. generate negative samples by randomly group sentence_1 and sentence_2 of positive samples 2. combine negative samples and positive samples Args: pos_sample_generator: a generator producing a parsed positive sample, which is a list: [token_ids, sent_ids, pos_ids, 1] Returns: sample: one sample from shuffled positive samples and negative samples """ pos_samples = [] neg_samples = [] num_total_miss = 0 pos_sample_num = 0 try: while True: while len(pos_samples) < buffer: sample = next(sample_generator) next_sentence_label = sample[-1] if next_sentence_label == 1: pos_samples.append(sample) pos_sample_num += 1 else: neg_samples.append(sample) new_neg_samples, miss_num = self.random_pair_neg_samples( pos_samples) num_total_miss += miss_num samples = pos_samples + neg_samples + new_neg_samples pos_samples = [] neg_samples = [] np.random.shuffle(samples) for sample in samples: yield sample except StopIteration: print("stopiteration: reach end of file") if len(pos_samples) == 1: yield pos_samples[0] elif not pos_samples: yield None else: new_neg_samples, miss_num = self.random_pair_neg_samples( pos_samples) num_total_miss += miss_num samples = pos_samples + neg_samples + new_neg_samples pos_samples = [] neg_samples = []
np.random.shuffle(samples)
numpy.random.shuffle
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import random from .builder import DATASETS from .custom import CustomDataset from collections import OrderedDict from mmseg.core import eval_metrics, intersect_and_union, pre_eval_to_metrics import mmcv import numpy as np from mmcv.utils import print_log from prettytable import PrettyTable from mmseg.core import eval_metrics import os from sklearn import metrics from mmseg.utils import get_root_logger @DATASETS.register_module() class HSIGANDataset(CustomDataset): """Pascal VOC dataset. Args: split (str): Split txt file for Pascal VOC. """ CLASSES = ('noncancer', 'cancer') PALETTE = [[0, 0, 0], [255, 255, 255]] def __init__(self, split, gan_img_dir=None, gan_ann_dir=None, gan_split=None, gan_suffix=None, fake_rate=0.2, **kwargs): super(HSIGANDataset, self).__init__( img_suffix='.hdr', seg_map_suffix='.png', split=split, **kwargs) assert osp.exists(self.img_dir) and self.split is not None if gan_img_dir is not None: self.gan_img_dir = gan_img_dir self.gan_ann_dir = gan_ann_dir self.gan_split = gan_split self.gan_suffix = gan_suffix if self.data_root is not None: if not osp.isabs(self.gan_img_dir): self.gan_img_dir = osp.join(self.data_root, self.gan_img_dir) if not (self.ann_dir is None or osp.isabs(self.gan_ann_dir)): self.gan_ann_dir = osp.join(self.data_root, self.gan_ann_dir) if not (self.gan_split is None or osp.isabs(self.gan_split)): self.gan_split = osp.join(self.data_root, self.gan_split) self.gan_infos = self.load_annotations(self.gan_img_dir, self.gan_suffix, self.gan_ann_dir, self.seg_map_suffix, self.gan_split) self.len_ganx = len(self.gan_infos) self.fake_rate = fake_rate/(1 + fake_rate) self.len_gan = int(self.fake_rate * len(self.img_infos)) else: self.len_gan = 0 self.len_real = len(self.img_infos) def __len__(self): return self.len_real + self.len_gan def pre_pipeline(self, results): """Prepare results dict for pipeline.""" results['seg_fields'] = [] results['img_prefix'] = self.img_dir results['seg_prefix'] = self.ann_dir if self.custom_classes: results['label_map'] = self.label_map def pre_gan_pipeline(self, results): """Prepare results dict for pipeline.""" results['seg_fields'] = [] results['img_prefix'] = self.gan_img_dir results['seg_prefix'] = self.gan_ann_dir if self.custom_classes: results['label_map'] = self.label_map def get_ann_info(self, idx): """Get annotation by index.""" return self.img_infos[idx]['ann'] def get_gan_ann_info(self, idx): """Get annotation by index.""" return self.gan_infos[idx]['ann'] def prepare_train_img(self, idx): if idx < self.len_real: img_info = self.img_infos[idx] ann_info = self.get_ann_info(idx) results = dict(img_info=img_info, ann_info=ann_info) self.pre_pipeline(results) else: random_idx = random.randint(0, self.len_ganx-1) img_info = self.gan_infos[random_idx] ann_info = self.get_gan_ann_info(random_idx) # img_info = self.gan_infos[idx - self.len_real] # ann_info = self.get_gan_ann_info(idx - self.len_real) results = dict(img_info=img_info, ann_info=ann_info) self.pre_gan_pipeline(results) return self.pipeline(results) def evaluate(self, results, metric='mIoU', logger=None, gt_seg_maps=None, **kwargs): """Evaluate the dataset. Args: results (list[tuple[torch.Tensor]] | list[str]): per image pre_eval results or predict segmentation map for computing evaluation metric. metric (str | list[str]): Metrics to be evaluated. 'mIoU', 'mDice' and 'mFscore' are supported. logger (logging.Logger | None | str): Logger used for printing related information during evaluation. Default: None. gt_seg_maps (generator[ndarray]): Custom gt seg maps as input, used in ConcatDataset Returns: dict[str, float]: Default metrics. """ if isinstance(metric, str): metric = [metric] allowed_metrics = ['mIoU', 'mDice', 'mFscore'] if not set(metric).issubset(set(allowed_metrics)): raise KeyError('metric {} is not supported'.format(metric)) eval_results = {} # test a list of files if mmcv.is_list_of(results, np.ndarray) or mmcv.is_list_of( results, str): if gt_seg_maps is None: gt_seg_maps = self.get_gt_seg_maps() num_classes = len(self.CLASSES) ret_metrics = eval_metrics( results, gt_seg_maps, num_classes, self.ignore_index, metric, label_map=self.label_map, reduce_zero_label=self.reduce_zero_label) # get kappa con_mat = np.zeros((2, 2)) for result, gt in zip(results, self.get_gt_seg_maps()): con_mat += metrics.confusion_matrix(gt.flatten(), result.flatten(), labels=[1, 0]) # test a list of pre_eval_results else: ret_metrics = pre_eval_to_metrics(results, metric) # get kappa con_mat = np.zeros((2, 2)) pre_eval_results = tuple(zip(*results)) total_area_intersect = sum(pre_eval_results[0]) total_area_label = sum(pre_eval_results[3]) con_mat[0][0] = total_area_intersect[0] con_mat[1][1] = total_area_intersect[1] con_mat[0][1] = total_area_label[1] - total_area_intersect[1] con_mat[1][0] = total_area_label[0] - total_area_intersect[0] # Because dataset.CLASSES is required for per-eval. if self.CLASSES is None: class_names = tuple(range(num_classes)) else: class_names = self.CLASSES # summary table ret_metrics_summary = OrderedDict({ ret_metric: np.round(np.nanmean(ret_metric_value) * 100, 2) for ret_metric, ret_metric_value in ret_metrics.items() }) # each class table ret_metrics.pop('aAcc', None) ret_metrics_class = OrderedDict({ ret_metric: np.round(ret_metric_value * 100, 2) for ret_metric, ret_metric_value in ret_metrics.items() }) ret_metrics_class.update({'Class': class_names}) ret_metrics_class.move_to_end('Class', last=False) # for logger class_table_data = PrettyTable() for key, val in ret_metrics_class.items(): class_table_data.add_column(key, val) summary_table_data = PrettyTable() for key, val in ret_metrics_summary.items(): if key == 'aAcc': summary_table_data.add_column(key, [val]) else: summary_table_data.add_column('m' + key, [val]) print_log('per class results:', logger) print_log('\n' + class_table_data.get_string(), logger=logger) print_log('Summary:', logger) print_log('\n' + summary_table_data.get_string(), logger=logger) # each metric dict for key, value in ret_metrics_summary.items(): if key == 'aAcc': eval_results[key] = value / 100.0 else: eval_results['m' + key] = value / 100.0 ret_metrics_class.pop('Class', None) for key, value in ret_metrics_class.items(): eval_results.update({ key + '.' + str(name): value[idx] / 100.0 for idx, name in enumerate(class_names) }) print_log('mIoU:{:.4f}'.format(eval_results['mIoU']), logger=logger) print_log('mDice:{:.4f}'.format(eval_results['mDice']), logger=logger) print_log('mAcc:{:.4f}'.format(eval_results['mAcc']), logger=logger) print_log('aAcc:{:.4f}'.format(eval_results['aAcc']), logger=logger) print_log('kappa:{:.4f}'.format(kappa(con_mat)), logger=logger) print_log('accuracy:{:.4f}'.format(accuracy(con_mat)), logger=logger) # print_log('precision:{:.4f}'.format(precision(con_mat)), logger=logger) # print_log('sensitivity:{:.4f}'.format(sensitivity(con_mat)), logger=logger) # print_log('specificity:{:.4f}'.format(specificity(con_mat)), logger=logger) return eval_results def kappa(matrix): matrix = np.array(matrix) n =
np.sum(matrix)
numpy.sum
import numpy as np def F_1(): direction = { # horz, depth "forward": np.array([1, 0]), "down" : np.array([0, 1]), "up" : np.array([0, -1]), } positions = np.array([0, 0]) # read file. with open('input.txt') as f: for line in f: dir, mag = line.split() positions += direction[dir] * int(mag) print("\thorz : ", positions[0]) print("\tdepth : ", positions[1]) print("\tproduct : ", positions[0] * positions[1]) def F_2(): direction = { # horz, depth, aim "forward":
np.array([1, 0, 0])
numpy.array
import os import random import numpy as np import cv2 import json import src.core as core from src.core.evaluation_utils_2d import two_d_iou, calc_heatmap from demos.demo_utils.bdd_demo_utils import read_bdd_format from demos.demo_utils.vis_utils import draw_box_2d, draw_ellipse_2d_corners from src.retina_net.anchor_generator.box_utils import vuhw_to_vuvu_np categories = { 0: 'car', 1: 'truck', 2: 'bus', 3: 'person', 4: 'rider', 5: 'bike', 6: 'motor', 7: 'bkgrnd'} def main(): ######################################################### # Specify Source Folders and Parameters For Frame Reader ######################################################### data_split_dir = 'val' # Specify whether the validation or inference results need to be # visualized. results_dir = 'testing' uncertainty_method = 'bayes_od_none' dataset_dir = os.path.expanduser('~/Datasets/bdd100k') image_dir = os.path.join(dataset_dir, 'images', '100k', data_split_dir) label_file_name = os.path.join( dataset_dir, 'labels', data_split_dir) + '.json' checkpoint_name = 'retinanet_bdd' checkpoint_number = '101' mean_dir = os.path.join( core.data_dir(), 'outputs', checkpoint_name, 'predictions', results_dir, 'bdd', checkpoint_number, uncertainty_method, 'mean') cov_dir = os.path.join( core.data_dir(), 'outputs', checkpoint_name, 'predictions', results_dir, 'bdd', checkpoint_number, uncertainty_method, 'cov') cat_param_dir = os.path.join(core.data_dir(), 'outputs', checkpoint_name, 'predictions', results_dir, 'bdd', checkpoint_number, uncertainty_method, 'cat_param') frames_list = os.listdir(image_dir)[0:100] index = random.randint(0, len(frames_list)) frame_id = frames_list[index] print('Showing Frame ID:' + frame_id) ############# # Read Frame ############# im_path = image_dir + '/' + frame_id image = cv2.imread(im_path) all_labels = json.load(open(label_file_name, 'r')) category_gt, boxes_2d_gt, _ = read_bdd_format( frame_id, all_labels, categories=[ 'car', 'truck', 'bus', 'person', 'rider', 'bike', 'motor']) prediction_boxes_mean = np.load(mean_dir + '/' + frame_id + '.npy') prediction_boxes_cov = np.load(cov_dir + '/' + frame_id + '.npy') prediction_boxes_cat_params = np.load( cat_param_dir + '/' + frame_id + '.npy') # Read entropy for debugging purposes transformation_mat = np.array([[1, 0, -0.5, 0], [0, 1, 0, -0.5], [1, 0, 0.5, 0], [0, 1, 0, 0.5]]) prediction_boxes_cov = np.matmul( np.matmul( transformation_mat, prediction_boxes_cov), transformation_mat.T) prediction_boxes = vuhw_to_vuvu_np(prediction_boxes_mean) category_pred =
np.zeros(prediction_boxes_cat_params.shape)
numpy.zeros
#!/usr/bin/env python u""" <NAME>' - 03/2022 TEST: Estimate and Remove the contribution of a "Linear Ramp" to the Wrapped Phase of a Differential InSAR Interferogram. usage: rm_phase_ramp.py [-h] [--par PAR] in_interf positional arguments: in_interf Input Interferogram - Absolute Path optional arguments: -h, --help show this help message and exit --par PAR, -P PAR Interferogram Parameter File NOTE: In this implementation of the algorithm, a first guess or preliminary estimate of the parameters defining the ramp must be provided by the user. These parameters include the number of phase cycles characterizing the ramp in the X and Y (columns and rows) directions of the input raster. A GRID SEARCH around the user-defined first guess is performed to obtain the best estimate of the ramp parameters. PYTHON DEPENDENCIES: argparse: Parser for command-line options, arguments and sub-commands https://docs.python.org/3/library/argparse.html numpy: The fundamental package for scientific computing with Python https://numpy.org/ matplotlib: Visualization with Python https://matplotlib.org/ tqdm: Progress Bar in Python. https://tqdm.github.io/ datetime: Basic date and time types https://docs.python.org/3/library/datetime.html#module-datetime py_gamma: GAMMA's Python integration with the py_gamma module UPDATE HISTORY: """ # - Python dependencies from __future__ import print_function import os import argparse import datetime import numpy as np from tqdm import tqdm import matplotlib.pyplot as plt from pathlib import Path # - GAMMA's Python integration with the py_gamma module import py_gamma as pg from utils.read_keyword import read_keyword from utils.make_dir import make_dir def estimate_phase_ramp(dd_phase_complex: np.ndarray, cycle_r: int, cycle_c: int, slope_r: int = 1, slope_c: int = 1, s_radius: float = 2, s_step: float = 0.1) -> dict: """ Estimate a phase ramp from the provided input interferogram :param dd_phase_complex: interferogram phase expressed as complex array :param cycle_r: phase ramp number of cycles along rows :param cycle_c: phase ramp number of cycles along columns :param slope_r: phase ramp slope sign - rows axis :param slope_c: phase ramp slope sign - columns axis :param s_radius: grid search domain radius :param s_step: grid search step :return: Python dictionary containing the results of the grid search """ # - Generate synthetic field domain array_dim = dd_phase_complex.shape n_rows = array_dim[0] n_columns = array_dim[1] raster_mask = np.ones(array_dim) raster_mask[np.isnan(dd_phase_complex)] = 0 # - Integration Domain used to define the phase ramp xx_m, yy_m = np.meshgrid(np.arange(n_columns), np.arange(n_rows)) if s_radius > 0: if cycle_r - s_radius <= 0: n_cycle_r_vect_f = np.arange(s_step, cycle_r + s_radius + s_step, s_step) else: n_cycle_r_vect_f = np.arange(cycle_r - s_radius, cycle_r + s_radius + s_step, s_step) if cycle_c - s_radius <= 0: n_cycle_c_vect_f = np.arange(s_step, cycle_c + s_radius + s_step, s_step) else: n_cycle_c_vect_f = np.arange(cycle_c - s_radius, cycle_c + s_radius + s_step, s_step) # - Create Grid Search Domain error_array_f = np.zeros([len(n_cycle_r_vect_f), len(n_cycle_c_vect_f)]) for r_count, n_cycle_r in tqdm(enumerate(list(n_cycle_r_vect_f)), total=len(n_cycle_r_vect_f), ncols=60): for c_count, n_cycle_c in enumerate(list(n_cycle_c_vect_f)): synth_real = slope_c * (2 * np.pi / n_columns) \ * n_cycle_c * xx_m synth_imag = slope_r * (2 * np.pi / n_rows) \ * n_cycle_r * yy_m synth_phase_plane = synth_real + synth_imag synth_complex = np.exp(1j * synth_phase_plane) # - Compute Complex Conjugate product between the synthetic # - phase ramp and the input interferogram. dd_phase_complex_corrected \ = np.angle(dd_phase_complex * np.conj(synth_complex)) # - Compute the Mean Absolute value of the phase residuals # - > Mean Absolute Error error = np.abs(dd_phase_complex_corrected) mae = np.nansum(error) / np.nansum(raster_mask) error_array_f[r_count, c_count] = mae # - Find location of the Minimum Absolute Error Value ind_min = np.where(error_array_f == np.nanmin(error_array_f)) n_cycle_c = n_cycle_c_vect_f[ind_min[1]][0] n_cycle_r = n_cycle_r_vect_f[ind_min[0]][0] freq_r = n_cycle_r/n_rows freq_c = n_cycle_c/n_columns else: # - Integration Domain used to define the phase ramp freq_r = cycle_r/n_rows freq_c = cycle_c/n_columns return{'freq_r': freq_r, 'freq_c': freq_c, 'xx_m': xx_m, 'yy_m': yy_m} def main() -> None: # - Read the system arguments listed after the program parser = argparse.ArgumentParser( description=""" """ ) # - Positional Arguments parser.add_argument('in_interf', type=str, default=None, help='Input Interferogram - Absolute Path') # - Interferogram Parameter File parser.add_argument('--par', '-P', type=str, help='Interferogram Parameter File', default='DEM_gc_par') args = parser.parse_args() # - Output figures parameters fig_format = 'jpeg' # - Absolute Path to input interferogram interf_input_path = Path(args.in_interf) # - Extract Data Directory from input file path data_dir = interf_input_path.parent # - Create Output Directory out_dir = make_dir(data_dir, 'DERAMP') # - interf_output_path = Path(os.path.join(data_dir, interf_input_path.name + '_deramped')) # - Interferogram Coherence Mask calculated using pg.edf filter coh_mask = Path(os.path.join(data_dir, interf_input_path.name + '.filt.coh')) # - extract Reference SLC name ref_slc = interf_input_path.name.split('-')[0].replace('coco', '') # - Interferogram Parameter File par_file = os.path.join(data_dir, args.par) # - Interferogram Width interf_width = int(read_keyword(par_file, 'width')) # - Import Coherence Mask saved as Gamma Software binary image # - Note: Set type = float coh_in = pg.read_image(coh_mask, width=interf_width, dtype='float') # - Show Coherence Mask plt.figure() plt.title('Interferogram Coherence') plt.imshow(coh_in, cmap=plt.get_cmap('viridis')) plt.colorbar() plt.tight_layout() plt.savefig(os.path.join(out_dir, 'coherence_map.'+fig_format), dpi=200, format=fig_format) plt.close() # - Compute Binary Mask using grid point with Coherence > 0 coh_mask = np.zeros(coh_in.shape) coh_mask[coh_in > 0.] = 1 # - Show Binary Mask plt.figure() plt.title('Binary Mask') plt.imshow(coh_mask, cmap=plt.get_cmap('gray')) plt.colorbar() plt.savefig(os.path.join(out_dir, 'binary_mask.'+fig_format), dpi=200, format=fig_format) plt.close() # - Read Complex Interferogram saved as Gamma Software binary image # - Note: Set type = fcomplex interf_in = pg.read_image(interf_input_path, width=interf_width, dtype='fcomplex') # - Interferogram Size n_rows = interf_in.shape[0] n_columns = interf_in.shape[1] # - Compute wrapped interferometric phase data_in_r_phase = np.angle(interf_in) plt.figure() plt.title('Input Interferometric Phase') plt.imshow(data_in_r_phase, cmap=plt.cm.get_cmap('jet')) plt.colorbar() plt.savefig(os.path.join(out_dir, 'input_interferometric_phase.'+fig_format), dpi=200, format=fig_format) plt.close() print('# - input_interferometric_phase.'+fig_format + ' - available inside DERAMP directory.') # - Crop the interferogram over the area where the ramp is more easily # - detectable. print('\n\n# - Chose Interferogram sub region that will be used to' 'estimate the linear ramp: ') row_min = int(input('# - Row Min: ')) row_max = int(input('# - Row Max: ')) col_min = int(input('# - Column Min: ')) col_max = int(input('# - Column Max: ')) # - Cropped Interferometric Phase Map: region used to estimate the # - phase ramp. data_in_r_phase_c = data_in_r_phase[row_min: row_max, col_min: col_max] plt.figure() plt.title('Cropped Interferometric Phase Map') plt.imshow(data_in_r_phase_c, cmap=plt.cm.get_cmap('jet')) plt.colorbar() plt.savefig(os.path.join(out_dir, 'cropped_input_interferometric_phase.'+fig_format), dpi=200, format=fig_format) plt.close() print('# - cropped_input_interferometric_phase.'+fig_format + ' - available inside DERAMP directory.') # - Search Parameters print('\n\n# - Phase Ramp Removal Parameters. Provide first guess: ') n_cycles_r = int(input('# - Number of Cycles along Rows: ')) n_cycles_c = int(input('# - Number of Cycles along Columns: ')) slope_r = int(input('# - Phase Slope along Rows: ')) slope_c = int(input('# - Phase Slope along Columns: ')) s_radius = int(input('# - Search Radius - if > 0, use Grid Search: ')) # - Estimate Phase Ramp Parameters print('# - Estimating Phase Ramp Parameters.') ramp = estimate_phase_ramp(data_in_r_phase_c, n_cycles_r, n_cycles_c, slope_r=slope_r, slope_c=slope_c, s_radius=s_radius) freq_r = ramp['freq_r'] freq_c = ramp['freq_c'] xx_m = ramp['xx_m'] yy_m = ramp['yy_m'] # - Generate Synthetic Ramp covering the entire interferogram domain. synth_real = slope_c * 2 * np.pi * freq_c * xx_m synth_imag = slope_r * 2 * np.pi * freq_r * yy_m synth_phase_plane = synth_real + synth_imag synth_complex = np.exp(1j * synth_phase_plane) phase_ramp = np.angle(synth_complex) print(' ') print('# - Estimated Frequencies: ') print(f'# - Fr [cycles/pixel]: {freq_r}') print(f'# - Fc [cycles/pixel] : {freq_c}') print('# - Total Number of Cycles: ') print(f'# - Rows: {freq_r*n_rows}') print(f'# - Columns : {freq_c*n_columns}') # - Estimated Phase Ramp plt.figure() plt.title('Estimated Phase Ramp') plt.imshow(phase_ramp, cmap=plt.cm.get_cmap('jet')) plt.colorbar() plt.savefig(os.path.join(out_dir, 'phase_ramp.'+fig_format), dpi=200, format=fig_format) plt.close() # - Compute synthetic ramp xx_m, yy_m = np.meshgrid(np.arange(n_columns), np.arange(n_rows)) # - Generate Synthetic Ramp synth_real = slope_c * 2 * np.pi * freq_c * xx_m synth_imag = slope_r * 2 * np.pi * freq_r * yy_m synth_phase_plane = synth_real + synth_imag # - Phase Ramp in Exponential Complex Format synth_complex = np.exp(1j * synth_phase_plane) # - Remove the estimated phase ramp from the input phase field by # - computing the complex conjugate product between the input phase # - field and the estimated ramp. dd_phase_complex_corrected = interf_in *
np.conjugate(synth_complex)
numpy.conjugate
import prona2019Mod.utils as utils import itertools as it from six import iteritems, string_types, PY2, next import numpy as np import sys def _is_single(obj): """ Check whether `obj` is a single document or an entire corpus. Returns (is_single, new) 2-tuple, where `new` yields the same sequence as `obj`. `obj` is a single document if it is an iterable of strings. It is a corpus if it is an iterable of documents. """ obj_iter = iter(obj) temp_iter = obj_iter try: peek = next(obj_iter) obj_iter = it.chain([peek], obj_iter) except StopIteration: # An empty object is a single document return True, obj if isinstance(peek, string_types): # It's a document, return the iterator return True, obj_iter if temp_iter == obj: # Checking for iterator to the object return False, obj_iter else: # If the first item isn't a string, assume obj is a corpus return False, obj ''' def _apply(corpus, chunksize=None, **kwargs): """Apply the transformation to a whole corpus and get the result as another corpus. Parameters ---------- corpus : iterable of list of (int, number) Corpus in BoW format. chunksize : int, optional If provided - more effective processing (by group of documents) will performed. kwargs Arbitrary keyword arguments. Returns ------- :class:`~gensim.interfaces.TransformedCorpus` Transformed corpus. """ return TransformedCorpus(self, corpus, chunksize, **kwargs) ''' def score_item(worda, wordb, components, scorer, phrasegrams): """score is retained from original dataset """ try: return phrasegrams[tuple(components)][1] except KeyError: return -1 def analyze_sentence(sentence, threshold, common_terms, scorer,phrasegrams): """Analyze a sentence `sentence` a token list representing the sentence to be analyzed. `threshold` the minimum score for a bigram to be taken into account `common_terms` the list of common terms, they have a special treatment `scorer` the scorer function, as given to Phrases """ s = [utils.any2utf8(w) for w in sentence] last_uncommon = None in_between = [] # adding None is a trick that helps getting an automatic happy ending # has it won't be a common_word, nor score for word in s + [None]: is_common = word in common_terms if not is_common and last_uncommon: chain = [last_uncommon] + in_between + [word] # test between last_uncommon score = score_item( worda=last_uncommon, wordb=word, components=chain, scorer=scorer, phrasegrams=phrasegrams ) if score > threshold: yield (chain, score) last_uncommon = None in_between = [] else: # release words individually for w in it.chain([last_uncommon], in_between): yield (w, None) in_between = [] last_uncommon = word elif not is_common: last_uncommon = word else: # common term if last_uncommon: # wait for uncommon resolution in_between.append(word) else: yield (word, None) def get_phrase(sentence,phrase_model): is_single, sentence = _is_single(sentence) if not is_single: # if the input is an entire corpus (rather than a single sentence), # return an iterable stream. sys.exit("It is not a protein sequence") delimiter = phrase_model['delimiter'] bigrams = analyze_sentence( sentence, threshold=phrase_model['threshold'], common_terms=phrase_model['common_terms'], scorer=None, phrasegrams=phrase_model['phrasegrams']) # we will use our score_item function redefinition new_s = [] for words, score in bigrams: if score is not None: words = delimiter.join(words) new_s.append(words) return [utils.to_unicode(w) for w in new_s] def split_ngrams(seq, n): """ 'AGAMQSASM' => [['AGA', 'MQS', 'ASM'], ['GAM','QSA'], ['AMQ', 'SAS']] """ all_ngrams=[] for x in range(n): all_ngrams.append(zip(*[iter(seq[x:])]*n)) str_ngrams = [] for ngrams in all_ngrams: x = [] for ngram in ngrams: x.append("".join(ngram)) str_ngrams.append(x) return str_ngrams def to_vecs(seq,phrase_model,kmer,word2vec_index): """ convert sequence to three n-length vectors e.g. 'AGAMQSASM' => [ array([ ... * 100 ], array([ ... * 100 ], array([ ... * 100 ] ] """ ngram_patterns = split_ngrams(seq, kmer) protvecs = [] for ngrams in ngram_patterns: ngram_vecs = [] if phrase_model=='none': ngramss = ngrams else: ngramss=get_phrase(get_phrase(ngrams,phrase_model),phrase_model) for ngram in ngramss: try: ngram_vecs.append(
np.array(word2vec_index[ngram])
numpy.array
#!/usr/bin/env python ## # # Create a plot of predicted and actual positions by listening to # ROS topics that contain pose and covariance information. # ## import rospy import numpy as np import GPy import matplotlib.pyplot as plt from nav_msgs.msg import Odometry from geometry_msgs.msg import PoseWithCovarianceStamped # Initialize a rospy node rospy.init_node("prediction_plotter") # Number of steps in the future to record predictions for num_steps = 10 # Global variables store realtime infomation actual_positions_x = [] actual_positions_y = [] actual_position_time = [] projected_positions = {} projected_position_time = [] # Wait to get a prediction before starting to record actual pose got_projected_position = False # Parameters for timed callbacks r = 10 # recording rate of messages, in Hz rate = rospy.Rate(r) interval = 1. / r last_time = rospy.get_time() # Callback functions def real_pose_cb(data): """ Handle the actual position information from Stage """ global last_time global actual_positions_x global actual_positions_y global actual_position_time if (rospy.get_time() > last_time + interval) and got_projected_position: x = data.pose.pose.position.x y = data.pose.pose.position.y t = data.header.stamp.to_sec() actual_positions_x.append(x) actual_positions_y.append(y) actual_position_time.append(t) last_time = rospy.get_time() def projected_pose_cb(data, step_num): """ Put predicted pose data in a dictionary, index by the time at which the prediction was made """ global projected_positions global got_projected_position global projected_position_time # Indicated that we can start collecting real poses if not got_projected_position: got_projected_position = True x = data.pose.pose.position.x y = data.pose.pose.position.y t = data.header.stamp.to_sec() cov = data.pose.covariance sigma =
np.zeros((2,2))
numpy.zeros
from __future__ import print_function import mir_eval import numpy as np import soundfile as sf from pystoi.stoi import stoi as source_stoi import subprocess import os from copy import deepcopy as dc import sys def calc_sdr(degraded, reference): """ batch-wise SDR calculation for one audio file. estimation: (batch, n_sample) origin: (batch, n_sample) """ if len(degraded.shape) == 1: degraded = np.expand_dims(degraded, 0) reference = np.expand_dims(reference, 0) origin_power =
np.sum(reference ** 2, 1, keepdims=True)
numpy.sum
import unittest import warnings import numpy as np from numpy.testing import assert_almost_equal import openmdao.api as om from openmdao.test_suite.components.sellar_feature import SellarIDF from openmdao.utils.assert_utils import assert_near_equal, assert_check_partials class TestEQConstraintComp(unittest.TestCase): def test_sellar_idf(self): prob = om.Problem(SellarIDF()) prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', disp=False) prob.setup() # check derivatives prob['y1'] = 100 prob['equal.rhs:y1'] = 1 prob.run_model() cpd = prob.check_partials(out_stream=None) assert_check_partials(cpd, atol=1e-5, rtol=1e-5) # check results prob.run_driver() assert_near_equal(prob['x'], 0., 1e-5) assert_near_equal(prob['z'], [1.977639, 0.], 1e-5) assert_near_equal(prob['obj_cmp.obj'], 3.18339395045, 1e-5) assert_almost_equal(prob['y1'], 3.16) assert_almost_equal(prob['d1.y1'], 3.16) assert_almost_equal(prob['y2'], 3.7552778) assert_almost_equal(prob['d2.y2'], 3.7552778) assert_almost_equal(prob['equal.y1'], 0.0) assert_almost_equal(prob['equal.y2'], 0.0) def test_create_on_init(self): prob = om.Problem() model = prob.model # find intersection of two non-parallel lines model.add_subsystem('indep', om.IndepVarComp('x', val=0.)) model.add_subsystem('f', om.ExecComp('y=3*x-3', x=0.)) model.add_subsystem('g', om.ExecComp('y=2.3*x+4', x=0.)) model.add_subsystem('equal', om.EQConstraintComp('y', val=11.)) model.connect('indep.x', 'f.x') model.connect('indep.x', 'g.x') model.connect('f.y', 'equal.lhs:y') model.connect('g.y', 'equal.rhs:y') model.add_design_var('indep.x', lower=0., upper=20.) model.add_objective('f.y') prob.setup(mode='fwd') # verify that the output variable has been initialized self.assertEqual(prob['equal.y'], 11.) # verify that the constraint has not been added self.assertFalse('equal.y' in model.get_constraints()) # manually add the constraint model.add_constraint('equal.y', equals=0.) prob.setup(mode='fwd') prob.driver = om.ScipyOptimizeDriver(disp=False) prob.run_driver() assert_almost_equal(prob['equal.y'], 0.) assert_almost_equal(prob['indep.x'], 10.) assert_almost_equal(prob['f.y'], 27.) assert_almost_equal(prob['g.y'], 27.) cpd = prob.check_partials(out_stream=None) assert_check_partials(cpd, atol=1e-5, rtol=1e-5) def test_create_on_init_add_constraint(self): prob = om.Problem() model = prob.model # find intersection of two non-parallel lines model.add_subsystem('indep', om.IndepVarComp('x', val=0.)) model.add_subsystem('f', om.ExecComp('y=3*x-3', x=0.)) model.add_subsystem('g', om.ExecComp('y=2.3*x+4', x=0.)) model.add_subsystem('equal', om.EQConstraintComp('y', add_constraint=True)) model.connect('indep.x', 'f.x') model.connect('indep.x', 'g.x') model.connect('f.y', 'equal.lhs:y') model.connect('g.y', 'equal.rhs:y') model.add_design_var('indep.x', lower=0., upper=20.) model.add_objective('f.y') prob.setup(mode='fwd') # verify that the constraint has been added as requested self.assertTrue('equal.y' in model.get_constraints()) prob.driver = om.ScipyOptimizeDriver(disp=False) prob.run_driver() assert_almost_equal(prob['equal.y'], 0.) assert_almost_equal(prob['indep.x'], 10.) assert_almost_equal(prob['f.y'], 27.) assert_almost_equal(prob['g.y'], 27.) cpd = prob.check_partials(out_stream=None) assert_check_partials(cpd, atol=1e-5, rtol=1e-5) def test_create_on_init_add_constraint_no_normalization(self): prob = om.Problem() model = prob.model # find intersection of two non-parallel lines model.add_subsystem('indep', om.IndepVarComp('x', val=-2.0)) model.add_subsystem('f', om.ExecComp('y=3*x-3', x=0.)) model.add_subsystem('g', om.ExecComp('y=2.3*x+4', x=0.)) model.add_subsystem('equal', om.EQConstraintComp('y', add_constraint=True, normalize=False, ref0=0, ref=100.0)) model.connect('indep.x', 'f.x') model.connect('indep.x', 'g.x') model.connect('f.y', 'equal.lhs:y') model.connect('g.y', 'equal.rhs:y') model.add_design_var('indep.x', lower=0., upper=20.) model.add_objective('f.y') prob.setup(mode='fwd') # verify that the constraint has been added as requested self.assertTrue('equal.y' in model.get_constraints()) # verify that the output is not being normalized prob.run_model() lhs = prob['f.y'] rhs = prob['g.y'] diff = lhs - rhs assert_near_equal(prob['equal.y'], diff) prob.driver = om.ScipyOptimizeDriver(disp=False) prob.run_driver() assert_almost_equal(prob['equal.y'], 0.) assert_almost_equal(prob['indep.x'], 10.) assert_almost_equal(prob['f.y'], 27.) assert_almost_equal(prob['g.y'], 27.) cpd = prob.check_partials(out_stream=None) assert_check_partials(cpd, atol=1e-5, rtol=1e-5) def test_vectorized(self): prob = om.Problem() model = prob.model n = 100 # find intersection of two non-parallel lines, vectorized model.add_subsystem('indep', om.IndepVarComp('x', val=np.ones(n))) model.add_subsystem('f', om.ExecComp('y=3*x-3', x=np.ones(n), y=np.ones(n))) model.add_subsystem('g', om.ExecComp('y=2.3*x+4', x=np.ones(n), y=np.ones(n))) model.add_subsystem('equal', om.EQConstraintComp('y', val=np.ones(n), add_constraint=True)) model.add_subsystem('obj_cmp', om.ExecComp('obj=sum(y)', y=np.zeros(n))) model.connect('indep.x', 'f.x') model.connect('indep.x', 'g.x') model.connect('f.y', 'equal.lhs:y') model.connect('g.y', 'equal.rhs:y') model.connect('f.y', 'obj_cmp.y') model.add_design_var('indep.x', lower=np.zeros(n), upper=20.*np.ones(n)) model.add_objective('obj_cmp.obj') prob.setup(mode='fwd') prob.driver = om.ScipyOptimizeDriver(disp=False) prob.run_driver() assert_almost_equal(prob['equal.y'], np.zeros(n)) assert_almost_equal(prob['indep.x'], np.ones(n)*10.) assert_almost_equal(prob['f.y'], np.ones(n)*27.) assert_almost_equal(prob['g.y'], np.ones(n)*27.) cpd = prob.check_partials(out_stream=None) assert_check_partials(cpd, atol=1e-5, rtol=1e-5) def test_set_shape(self): prob = om.Problem() model = prob.model n = 100 # find intersection of two non-parallel lines, vectorized model.add_subsystem('indep', om.IndepVarComp('x', val=np.ones(n))) model.add_subsystem('f', om.ExecComp('y=3*x-3', x=np.ones(n), y=np.ones(n))) model.add_subsystem('g', om.ExecComp('y=2.3*x+4', x=np.ones(n), y=np.ones(n))) model.add_subsystem('equal', om.EQConstraintComp('y', shape=(n,), add_constraint=True)) model.add_subsystem('obj_cmp', om.ExecComp('obj=sum(y)', y=np.zeros(n))) model.connect('indep.x', 'f.x') model.connect('indep.x', 'g.x') model.connect('f.y', 'equal.lhs:y') model.connect('g.y', 'equal.rhs:y') model.connect('f.y', 'obj_cmp.y') model.add_design_var('indep.x', lower=np.zeros(n), upper=20.*np.ones(n)) model.add_objective('obj_cmp.obj') prob.setup(mode='fwd') prob.driver = om.ScipyOptimizeDriver(disp=False) prob.run_driver() assert_almost_equal(prob['equal.y'], np.zeros(n)) assert_almost_equal(prob['indep.x'], np.ones(n)*10.) assert_almost_equal(prob['f.y'], np.ones(n)*27.) assert_almost_equal(prob['g.y'], np.ones(n)*27.) cpd = prob.check_partials(out_stream=None) assert_check_partials(cpd, atol=1e-5, rtol=1e-5) def test_vectorized_no_normalization(self): prob = om.Problem() model = prob.model n = 100 # find intersection of two non-parallel lines, vectorized model.add_subsystem('indep', om.IndepVarComp('x', val=-2.0*np.ones(n))) model.add_subsystem('f', om.ExecComp('y=3*x-3', x=np.ones(n), y=np.ones(n))) model.add_subsystem('g', om.ExecComp('y=2.3*x+4', x=np.ones(n), y=np.ones(n))) model.add_subsystem('equal', om.EQConstraintComp('y', val=np.ones(n), add_constraint=True, normalize=False)) model.add_subsystem('obj_cmp', om.ExecComp('obj=sum(y)', y=np.zeros(n))) model.connect('indep.x', 'f.x') model.connect('indep.x', 'g.x') model.connect('f.y', 'equal.lhs:y') model.connect('g.y', 'equal.rhs:y') model.connect('f.y', 'obj_cmp.y') model.add_design_var('indep.x', lower=np.zeros(n), upper=20.*np.ones(n)) model.add_objective('obj_cmp.obj') prob.setup(mode='fwd') prob.driver = om.ScipyOptimizeDriver(disp=False) # verify that the output is not being normalized prob.run_model() lhs = prob['f.y'] rhs = prob['g.y'] diff = lhs - rhs assert_near_equal(prob['equal.y'], diff) prob.run_driver() assert_almost_equal(prob['equal.y'],
np.zeros(n)
numpy.zeros
import os import numpy as np import pybullet import gym import gym.spaces import gym.utils from jbdl.envs import get_mjcf_path, get_urdf_path class XmlBasedRobot: """ Base class for mujoco .xml based agents. """ self_collision = True def __init__(self, robot_name, action_dim, obs_dim, self_collision, add_ignored_joints=False): self.parts = None self.objects = [] self.jdict = None self.ordered_joints = None self.robot_body = None self.add_ignored_joints = add_ignored_joints high =
np.ones([action_dim])
numpy.ones
r""" For NV experiment (Bristol, 2019), the same Hahn echo gate is applied many times. Rather than compute the exponential to find the gate each time, they are stored here instead. """ from numpy import array precomputed_hahn_y_inversion_gates = { 1: array([[ 0.+0.j, -1.+0.j], [ 1.+0.j, 0.+0.j]]), 2: array([[ 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j], [ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]]), 3: array([[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j], [ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]]), 4: array([[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j], [ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]]), 5: array([[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j], [ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]]), 6: array([[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j], [ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]]), 7:
array([[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j], [ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]])
numpy.array
from typing import Any, NamedTuple, Optional import random import click import chess.pgn import numpy as np from tqdm import tqdm from neural_caissa.board.state import State _OUTCOME = {'1/2-1/2': 0, '0-1': -1, '1-0': 1} class CaissaData(NamedTuple): # TODO: Make numpy typing work: import numpy.typing as npt X_origin: Any X_move: Optional[Any] X_random: Optional[Any] Y: Any def _generate_dataset(data_file_path, full=False, samples=None) -> CaissaData: X_origin, X_move, X_random, Y = [], [], [], [] games_counter = 0 progress_bar = tqdm(total=games_counter + 1) with open(data_file_path, 'r') as pgn: while games_counter < samples: game = chess.pgn.read_game(pgn) if game is None: break result = game.headers['Result'] if result not in _OUTCOME: continue _y = _OUTCOME.get(result) board = game.board() for move in game.mainline_moves(): x_origin = State(board).serialize_conv(turn=board.turn) if full: x_move, x_random = _get_next_and_random_moves(board, move) X_move.append(x_move) X_random.append(x_random) board.push(move) X_origin.append(x_origin) Y.append(_y) games_counter += 1 progress_bar.update(1) progress_bar.close() X_origin = np.array(X_origin) Y =
np.array(Y)
numpy.array
import numpy as np import pytest import rdsolver as rd def test_grid_points_1d(): # Test standard correct = np.array([1, 2, 3, 4, 5]).astype(float) / 5 * 2 * np.pi assert np.isclose(rd.utils.grid_points_1d(5), correct).all() # Test standard with specified length correct = np.array([1, 2, 3, 4, 5]).astype(float) assert np.isclose(rd.utils.grid_points_1d(5, L=5), correct).all() # Test different starting point correct = np.array([1, 2, 3, 4, 5]).astype(float) / 5 * 2 * np.pi - 1.0 assert np.isclose(rd.utils.grid_points_1d(5, x_start=-1.0), correct).all() def test_grid_points_2d(): # Test standard n = (5, 5) correct_x = np.array([1, 2, 3, 4, 5]) / 5 * 2 * np.pi correct_y = np.array([1, 2, 3, 4, 5]) / 5 * 2 * np.pi correct_x_grid = np.array([[1, 1, 1, 1, 1], [2, 2, 2, 2, 2], [3, 3, 3, 3, 3], [4, 4, 4, 4, 4], [5, 5, 5, 5, 5]]) / 5 * 2 * np.pi correct_y_grid = np.array([[1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]) / 5 * 2 * np.pi correct_xx = np.array([1]*5 + [2]*5 + [3]*5 + [4]*5 + [5]*5) / 5 * 2 * np.pi correct_yy = np.array([1, 2, 3, 4, 5]*5) / 5 * 2 * np.pi x, y, xx, yy, x_grid, y_grid = rd.utils.grid_points_2d(n) assert np.isclose(x, correct_x).all() assert np.isclose(y, correct_y).all() assert np.isclose(x_grid, correct_x_grid).all() assert np.isclose(y_grid, correct_y_grid).all() assert np.isclose(xx, correct_xx).all() assert np.isclose(yy, correct_yy).all() # Test standard with different number of grid points n = (5, 6) correct_x = np.array([1, 2, 3, 4, 5]) / 5 * 2 * np.pi correct_y = np.array([1, 2, 3, 4, 5, 6]) / 6 * 2 * np.pi correct_x_grid = np.array([[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3], [4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5]]) / 5 * 2 * np.pi correct_y_grid = np.array([[1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6]]) / 6 * 2 * np.pi correct_xx = np.array([1]*6 + [2]*6 + [3]*6 + [4]*6 + [5]*6) / 5 * 2 * np.pi correct_yy = np.array([1, 2, 3, 4, 5, 6]*5) / 6 * 2 * np.pi x, y, xx, yy, x_grid, y_grid = rd.utils.grid_points_2d(n) assert np.isclose(x, correct_x).all() assert np.isclose(y, correct_y).all() assert np.isclose(x_grid, correct_x_grid).all() assert np.isclose(y_grid, correct_y_grid).all() assert np.isclose(xx, correct_xx).all() assert np.isclose(yy, correct_yy).all() # Test different physical lengths and different number of grid poitns n = (5, 6) L = (2*np.pi, 1) correct_x = np.array([1, 2, 3, 4, 5]) / 5 * 2 * np.pi correct_y = np.array([1, 2, 3, 4, 5, 6]) / 6 correct_x_grid = np.array([[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3], [4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5]]) / 5 * 2 * np.pi correct_y_grid = np.array([[1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6]]) / 6 correct_xx = np.array([1]*6 + [2]*6 + [3]*6 + [4]*6 + [5]*6) / 5 * 2 * np.pi correct_yy = np.array([1, 2, 3, 4, 5, 6]*5) / 6 x, y, xx, yy, x_grid, y_grid = rd.utils.grid_points_2d(n, L=L) assert np.isclose(x, correct_x).all() assert np.isclose(y, correct_y).all() assert np.isclose(x_grid, correct_x_grid).all() assert np.isclose(y_grid, correct_y_grid).all() assert np.isclose(xx, correct_xx).all() assert np.isclose(yy, correct_yy).all() # Test different physical lengths n = (5, 5) L = (2*np.pi, 1) correct_x = np.array([1, 2, 3, 4, 5]) / 5 * 2 * np.pi correct_y = np.array([1, 2, 3, 4, 5]) / 5 correct_x_grid = np.array([[1, 1, 1, 1, 1], [2, 2, 2, 2, 2], [3, 3, 3, 3, 3], [4, 4, 4, 4, 4], [5, 5, 5, 5, 5]]) / 5 * 2 * np.pi correct_y_grid = np.array([[1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]) / 5 correct_xx = np.array([1]*5 + [2]*5 + [3]*5 + [4]*5 + [5]*5) / 5 * 2 * np.pi correct_yy = np.array([1, 2, 3, 4, 5]*5) / 5 x, y, xx, yy, x_grid, y_grid = rd.utils.grid_points_2d(n, L=L) assert np.isclose(x, correct_x).all() assert np.isclose(y, correct_y).all() assert np.isclose(x_grid, correct_x_grid).all() assert np.isclose(y_grid, correct_y_grid).all() assert np.isclose(xx, correct_xx).all() assert np.isclose(yy, correct_yy).all() def test_wave_numbers_1d(): # 2π domain length correct = np.array([0, 1, 2, -3, -2, -1]) assert (correct == rd.utils.wave_numbers_1d(6)).all() # Other domain lengths L = 1 correct = np.array([0, 1, 2, -3, -2, -1]) * (2 * np.pi / L) assert (correct == rd.utils.wave_numbers_1d(6, L=L)).all() L = 7.89 correct = np.array([0, 1, 2, -3, -2, -1]) * (2 * np.pi / L) assert (correct == rd.utils.wave_numbers_1d(6, L=L)).all() # Odd domains correct = np.array([0, 1, 2, 3, -3, -2, -1]) assert (correct == rd.utils.wave_numbers_1d(7)).all() L = 1 correct = np.array([0, 1, 2, 3, -3, -2, -1]) * (2 * np.pi / L) assert (correct == rd.utils.wave_numbers_1d(7, L=L)).all() L = 7.89 correct = np.array([0, 1, 2, 3, -3, -2, -1]) * (2 * np.pi / L) assert (correct == rd.utils.wave_numbers_1d(7, L=L)).all() def test_wave_numbers_2d(): # 2π domain length correct_x = np.reshape(np.array([0, 1, 2, -3, -2, -1]*6), (6, 6), order='F') correct_y = np.reshape(np.array([0, 1, 2, -3, -2, -1]*6), (6, 6), order='C') kx, ky = rd.utils.wave_numbers_2d((6, 6)) assert (correct_x == kx).all() assert (correct_y == ky).all() # Mixed number of grid points correct_x = np.reshape(np.array([0, 1, 2, -3, -2, -1]*8), (6, 8), order='F') correct_y = np.reshape(np.array([0, 1, 2, 3, -4, -3, -2, -1]*6), (6, 8), order='C') kx, ky = rd.utils.wave_numbers_2d((6, 8)) assert (correct_x == kx).all() assert (correct_y == ky).all() # Mixed number of grid points amd different lengths L = (3.4, 5.7) correct_x = np.reshape(np.array([0, 1, 2, -3, -2, -1]*8), (6, 8), order='F') * (2*np.pi / L[0]) correct_y = np.reshape(np.array([0, 1, 2, 3, -4, -3, -2, -1]*6), (6, 8), order='C') * (2*np.pi / L[1]) kx, ky = rd.utils.wave_numbers_2d((6, 8), L=L) assert (correct_x == kx).all() assert (correct_y == ky).all() def test_spectral_integrate_2d(): L = (2*np.pi, 2*np.pi) n = (64, 64) x, y, xx, yy, x_grid, y_grid = rd.utils.grid_points_2d(n, L=L) f = np.exp(np.sin(x_grid) * np.cos(y_grid)) correct = 44.649967131680145266 assert np.isclose(rd.utils.spectral_integrate_2d(f, L=L), correct) L = (2*np.pi, 2*np.pi) n = (64, 128) x, y, xx, yy, x_grid, y_grid = rd.utils.grid_points_2d(n, L=L) f = np.exp(np.sin(x_grid) * np.cos(y_grid)) correct = 44.649967131680145266 assert np.isclose(rd.utils.spectral_integrate_2d(f, L=L), correct) L = (2*np.pi, 4*np.pi) n = (128, 64) x, y, xx, yy, x_grid, y_grid = rd.utils.grid_points_2d(n, L=L) f = np.exp(np.sin(x_grid) * np.cos(y_grid)) correct = 89.299934263360290533 assert np.isclose(rd.utils.spectral_integrate_2d(f, L=L), correct) def test_diff_multiplier_periodic_1d(): # Error out on odd number of grid points with pytest.raises(RuntimeError) as excinfo: rd.utils.diff_multiplier_periodic_1d(65) excinfo.match('Must have even number of grid points.') # First derivative correct = np.array([0, 1, 2, 3, 4, 0, -4, -3, -2, -1]) * 1j assert (rd.utils.diff_multiplier_periodic_1d(10) == correct).all() # Second derivative correct = -np.array([0, 1, 2, 3, 4, 5, -4, -3, -2, -1])**2 assert (rd.utils.diff_multiplier_periodic_1d(10, order=2) == correct).all() # Third derivative correct = -np.array([0, 1, 2, 3, 4, 0, -4, -3, -2, -1])**3 * 1j assert (rd.utils.diff_multiplier_periodic_1d(10, order=3) == correct).all() def test_diff_multiplier_periodic_2d(): # Error out on odd number of grid points with pytest.raises(RuntimeError) as excinfo: rd.utils.diff_multiplier_periodic_2d((65, 64)) excinfo.match('Must have even number of grid points.') # First derivative n = (10, 10) correct_yy = np.array( [[i]*10 for i in [0, 1, 2, 3, 4, 0, -4, -3, -2, -1]]) * 1j correct_xx = np.array( [[0, 1, 2, 3, 4, 0, -4, -3, -2, -1] for _ in range(10)]) * 1j mult_xx, mult_yy = rd.utils.diff_multiplier_periodic_2d(n) assert np.isclose(mult_xx, correct_xx).all() assert np.isclose(mult_yy, correct_yy).all() # Second derivative n = (10, 10) correct_yy = -np.array( [[i]*10 for i in [0, 1, 2, 3, 4, 5, -4, -3, -2, -1]])**2 correct_xx = -np.array( [[0, 1, 2, 3, 4, 5, -4, -3, -2, -1] for _ in range(10)])**2 mult_xx, mult_yy = rd.utils.diff_multiplier_periodic_2d(n, order=2) assert np.isclose(mult_xx, correct_xx).all() assert np.isclose(mult_yy, correct_yy).all() def test_diff_periodic_fft_2d(): # Test standard grid spacing n = (64, 64) L = None x, y, xx, yy, x_grid, y_grid = rd.utils.grid_points_2d(n, L=L) f = np.exp(np.sin(x_grid) * np.cos(y_grid)) df_dx, df_dy = rd.utils.diff_periodic_fft_2d(f, L=L) df_dx_correct = f * np.cos(x_grid) * np.cos(y_grid) df_dy_correct = -f * np.sin(x_grid) * np.sin(y_grid) assert np.isclose(df_dx, df_dx_correct).all() assert np.isclose(df_dy, df_dy_correct).all() # Different physical lengths of x and y n = (64, 64) L = (2*np.pi, 4*np.pi) x, y, xx, yy, x_grid, y_grid = rd.utils.grid_points_2d(n, L=L) f = np.exp(np.sin(x_grid) * np.cos(y_grid)) df_dx, df_dy = rd.utils.diff_periodic_fft_2d(f, L=L) df_dx_correct = f * np.cos(x_grid) * np.cos(y_grid) df_dy_correct = -f * np.sin(x_grid) * np.sin(y_grid) assert np.isclose(df_dx, df_dx_correct).all() assert np.isclose(df_dy, df_dy_correct).all() # Different number of grid points in x and y n = (64, 128) L = None x, y, xx, yy, x_grid, y_grid = rd.utils.grid_points_2d(n, L=L) f = np.exp(np.sin(x_grid) * np.cos(y_grid)) df_dx, df_dy = rd.utils.diff_periodic_fft_2d(f, L=L) df_dx_correct = f * np.cos(x_grid) * np.cos(y_grid) df_dy_correct = -f * np.sin(x_grid) * np.sin(y_grid) assert np.isclose(df_dx, df_dx_correct).all() assert np.isclose(df_dy, df_dy_correct).all() # Different number of grid points in x and y and different lengths n = (64, 128) L = (4*np.pi, 2*np.pi) x, y, xx, yy, x_grid, y_grid = rd.utils.grid_points_2d(n, L=L) f = np.exp(np.sin(x_grid) * np.cos(y_grid)) df_dx, df_dy = rd.utils.diff_periodic_fft_2d(f, L=L) df_dx_correct = f * np.cos(x_grid) * np.cos(y_grid) df_dy_correct = -f * np.sin(x_grid) * np.sin(y_grid) assert np.isclose(df_dx, df_dx_correct).all() assert np.isclose(df_dy, df_dy_correct).all() # Test standard grid spacing, second derivative n = (64, 64) L = None x, y, xx, yy, x_grid, y_grid = rd.utils.grid_points_2d(n, L=L) f = np.exp(np.sin(x_grid) * np.cos(y_grid)) df_dx, df_dy = rd.utils.diff_periodic_fft_2d(f, L=L, order=2) df_dx_correct = f * np.cos(y_grid) \ * (np.cos(x_grid)**2 * np.cos(y_grid) - np.sin(x_grid)) df_dy_correct = f * np.sin(x_grid) \ * (np.sin(y_grid)**2 * np.sin(x_grid) - np.cos(y_grid)) assert np.isclose(df_dx, df_dx_correct).all() assert np.isclose(df_dy, df_dy_correct).all() # Different physical lengths of x and y, second derivative n = (64, 64) L = (2*np.pi, 4*np.pi) x, y, xx, yy, x_grid, y_grid = rd.utils.grid_points_2d(n, L=L) f = np.exp(np.sin(x_grid) * np.cos(y_grid)) df_dx, df_dy = rd.utils.diff_periodic_fft_2d(f, L=L, order=2) df_dx_correct = f * np.cos(y_grid) \ * (np.cos(x_grid)**2 * np.cos(y_grid) - np.sin(x_grid)) df_dy_correct = f * np.sin(x_grid) \ * (np.sin(y_grid)**2 * np.sin(x_grid) - np.cos(y_grid)) assert np.isclose(df_dx, df_dx_correct).all() assert np.isclose(df_dy, df_dy_correct).all() # Different number of grid points in x and y, second derivative n = (64, 128) L = None x, y, xx, yy, x_grid, y_grid = rd.utils.grid_points_2d(n, L=L) f = np.exp(np.sin(x_grid) * np.cos(y_grid)) df_dx, df_dy = rd.utils.diff_periodic_fft_2d(f, L=L, order=2) df_dx_correct = f * np.cos(y_grid) \ * (np.cos(x_grid)**2 * np.cos(y_grid) - np.sin(x_grid)) df_dy_correct = f * np.sin(x_grid) \ * (np.sin(y_grid)**2 * np.sin(x_grid) - np.cos(y_grid)) assert np.isclose(df_dx, df_dx_correct).all() assert np.isclose(df_dy, df_dy_correct).all() # Different number of grid points in x and y and diff len, second derivative n = (64, 128) L = (4*np.pi, 2*np.pi) x, y, xx, yy, x_grid, y_grid = rd.utils.grid_points_2d(n, L=L) f = np.exp(np.sin(x_grid) *
np.cos(y_grid)
numpy.cos
import numpy as np import anoa.misc as misc import anoa.core.ops as ops import exceptions, copy import anoa.core.decorator as decor class _RMatmul_Const(ops.Transform): @decor.linear_transform_initialisation("const") def __init__(self, matrix): self.matrix = matrix self.matrixT = np.transpose(matrix) def forward(self, x): return np.matmul(self.matrix, x) @decor.put_child_values_arguments def adjoint(self, x): return np.matmul(self.matrixT, x) class _Matmul_Const(ops.Transform): @decor.linear_transform_initialisation("const") def __init__(self, matrix): self.matrix = matrix self.matrixT = np.transpose(matrix) def forward(self, x): return np.matmul(x, self.matrix) @decor.put_child_values_arguments def adjoint(self, x): return np.matmul(x, self.matrixT) class _Matmul_Op(ops.Transform): def forward(self, x, y): return np.matmul(x, y) def adjoint(self, x, *op_vals): child0 = op_vals[0] child1 = op_vals[1] grad0 = np.matmul(x, np.transpose(child1)) grad1 = np.matmul(np.transpose(child0), x) return [grad0, grad1] class _Mask(ops.Transform): """ ??? """ @decor.linear_transform_initialisation("unary") def __init__(self, mask): self.mask = mask def forward(self, x): return x * self.mask @decor.put_child_values_arguments def adjoint(self, x): return x * self.mask class _Sum(ops.Transform): """ ??? """ @decor.linear_transform_initialisation("unary") def __init__(self, input_shape, axis=None): self.input_shape = input_shape self.axis = axis def forward(self, x): return np.sum(x, axis=self.axis) @decor.put_child_values_arguments @decor.normalise_axis def adjoint(self, x): # expand the axis to have the same dimension as self.shape x_expand = x for axis in self.axis: x_expand = np.expand_dims(x, axis) # copy the elements into the new axis return np.broadcast_to(x_expand, self.input_shape) class _Shear(ops.Transform): """ ??? """ @decor.linear_transform_initialisation("unary") def __init__(self, shift_per_pixel=1, direction_axis=-1, surface_normal_axis=-2): self.shift_per_pixel = int(shift_per_pixel) self.direction_axis = direction_axis self.surface_normal_axis = surface_normal_axis self.shift_per_pixel = shift_per_pixel self.normal_shape = None self.sheared_shape = None def _assign_shape(self, normal_shape): # convert the axis to non-negative axis ndim = len(normal_shape) self.normal_shape = normal_shape self.direction_axis = misc._positive_axis(self.direction_axis, ndim) self.surface_normal_axis = misc._positive_axis(self.surface_normal_axis, ndim) # calculate the sheared shape self.sheared_shape = list(normal_shape) self.sheared_shape[self.direction_axis] = normal_shape[self.direction_axis] + (normal_shape[self.surface_normal_axis] - 1) * abs(self.shift_per_pixel) self._get_indices() def _get_indices(self): # get the input index idx_beginning = 0 if self.shift_per_pixel > 0 else self.sheared_shape[self.direction_axis]-self.normal_shape[self.direction_axis] idx_end = self.normal_shape[self.direction_axis] if self.shift_per_pixel > 0 else self.sheared_shape[self.direction_axis] self.input_index = np.index_exp[:] * (self.direction_axis) + np.index_exp[idx_beginning:idx_end] + np.index_exp[:] * (len(self.normal_shape) - self.direction_axis - 1) # roll index index = np.index_exp[:] * self.surface_normal_axis index_suffix = np.index_exp[:] * (len(self.normal_shape) - self.surface_normal_axis - 1) self.roll_axis = (self.direction_axis - 1) if self.surface_normal_axis < self.direction_axis else self.direction_axis self.roll_index = [] for i in range(self.normal_shape[self.surface_normal_axis]): self.roll_index.append(index + (i,) + index_suffix) def forward(self, x): if self.normal_shape is None or self.input_shape != self.normal_shape: self._assign_shape(self.input_shape) y = np.zeros(self.sheared_shape) # copy the input, x, to y first with zero padding in direction_axis y[self.input_index] = x # now roll the axis for i in range(self.normal_shape[self.surface_normal_axis]): # get the i-th slice of the surface_normal_axis y[self.roll_index[i]] = np.roll(y[self.roll_index[i]], i * self.shift_per_pixel, axis=self.roll_axis) return y @decor.put_child_values_arguments def adjoint(self, y): # transpose of shearing is just de-shearing (shear in the opposite direction) y_copy = np.copy(y) # roll back the axis for i in range(self.normal_shape[self.surface_normal_axis]): # get the i-th slice of the surface_normal_axis y_copy[self.roll_index[i]] = np.roll(y_copy[self.roll_index[i]], -i * self.shift_per_pixel, axis=self.roll_axis) # truncate the array x = y_copy[self.input_index] return x class _Deshear(_Shear): """ ??? """ def _assign_sheared_shape(self, sheared_shape): # convert the axis to non-negative axis ndim = len(sheared_shape) self.sheared_shape = sheared_shape self.direction_axis = misc._positive_axis(self.direction_axis, ndim) self.surface_normal_axis = misc._positive_axis(self.surface_normal_axis, ndim) # calculate the sheared shape self.normal_shape = list(sheared_shape) self.normal_shape[self.direction_axis] = sheared_shape[self.direction_axis] - (sheared_shape[self.surface_normal_axis] - 1) * abs(self.shift_per_pixel) self._get_indices() def forward(self, y): if self.sheared_shape is None or self.input_shape != self.sheared_shape: self._assign_sheared_shape(self.input_shape) y_copy = np.copy(y) # roll back the axis for i in range(self.normal_shape[self.surface_normal_axis]): # get the i-th slice of the surface_normal_axis y_copy[self.roll_index[i]] = np.roll(y_copy[self.roll_index[i]], -i * self.shift_per_pixel, axis=self.roll_axis) # truncate the array x = y_copy[self.input_index] return x @decor.put_child_values_arguments def adjoint(self, x): y = np.zeros(self.sheared_shape) # copy the input, x, to y first with zero padding in direction_axis y[self.input_index] = x # now roll the axis for i in range(self.normal_shape[self.surface_normal_axis]): # get the i-th slice of the surface_normal_axis y[self.roll_index[i]] = np.roll(y[self.roll_index[i]], i * self.shift_per_pixel, axis=self.roll_axis) return y class _Shift(ops.Transform): """ ??? """ @decor.linear_transform_initialisation("unary") def __init__(self, shift, axis=-1, boundary="periodic"): self.shift = shift self.axis = axis self.boundary = boundary.lower() self.list_of_boundaries = ["periodic", "symmetric", "reflect", "zeros", "same"] assert self.boundary in self.list_of_boundaries, "the boundary argument must be one of %s or a number" % self.list_of_boundaries def forward(self, x): # input checking assert self.shift < x.shape[self.axis], "the input size in axis %d (%d) must be more than the shift: %d" % (self.axis, x.shape[self.axis], self.shift) # roll the axis y = np.roll(x, self.shift, axis=self.axis) if self.boundary == "periodic": return y else: axis = misc._positive_axis(self.axis, len(x.shape)) # index of the newly shifted-in elements if self.shift >= 0: idx_begin = 0 idx_end = self.shift else: idx_begin = x.shape[axis] + self.shift idx_end = x.shape[axis] index = np.index_exp[:] * axis + np.index_exp[idx_begin:idx_end] + np.index_exp[:] * (len(x.shape) - axis - 1) if self.boundary == "zeros": y[index] = 0 return y elif self.boundary == "same": # obtain the index for the edge of the shifted elements if self.shift >= 0: idx_edge = self.shift else: idx_edge = x.shape[axis] + self.shift - 1 index_edge = np.index_exp[:] * axis + np.index_exp[idx_edge:idx_edge+1] + np.index_exp[:] * (len(x.shape) - axis - 1) # broadcast the edge's value to fill in the shifted-in elements y[index] = np.broadcast_to(y[index_edge], y[index].shape) return y elif self.boundary == "symmetric": # get the index of the input element in the shifted axis, reversed if self.shift >= 0: idx_begin = self.shift - 1 idx_end = -1 else: idx_begin = x.shape[axis] - 1 idx_end = x.shape[axis] - 1 + self.shift if idx_end == -1: index_input = np.index_exp[:] * axis + np.index_exp[idx_begin::-1] + np.index_exp[:] * (len(x.shape) - axis - 1) else: index_input = np.index_exp[:] * axis + np.index_exp[idx_begin:idx_end:-1] + np.index_exp[:] * (len(x.shape) - axis - 1) # fill in the shifted-in element with the input elements y[index] = x[index_input] return y elif self.boundary == "reflect": # get the index of the input element in the shifted axis, reversed if self.shift >= 0: idx_begin = self.shift idx_end = 0 else: idx_begin = x.shape[axis] - 2 idx_end = x.shape[axis] - 2 + self.shift index_input = np.index_exp[:] * axis + np.index_exp[idx_begin:idx_end:-1] + np.index_exp[:] * (len(x.shape) - axis - 1) # fill in the shifted-in element with the input elements y[index] = x[index_input] return y @decor.put_child_values_arguments def adjoint(self, x): # input checking assert self.shift < x.shape[self.axis], "the input size in axis %d must be more than the shift %d" % (self.axis, self.shift) # roll the axis y = np.roll(x, -self.shift, axis=self.axis) if self.boundary == "periodic": return y else: axis = misc._positive_axis(self.axis, len(x.shape)) # index of the newly shifted-in and shifted-out elements if self.shift >= 0: idx_begin_in = x.shape[axis] - self.shift idx_end_in = x.shape[axis] idx_begin_out = 0 idx_end_out = self.shift else: idx_begin_in = 0 idx_end_in = -self.shift idx_begin_out = x.shape[axis] + self.shift idx_end_out = x.shape[axis] index_in = np.index_exp[:] * axis + np.index_exp[idx_begin_in:idx_end_in] + np.index_exp[:] * (len(x.shape) - axis - 1) index_out = np.index_exp[:] * axis + np.index_exp[idx_begin_out:idx_end_out] + np.index_exp[:] * (len(x.shape) - axis - 1) # zeroing the newly shifted-in elements y[index_in] = 0 if self.boundary == "zeros": return y elif self.boundary == "same": # obtain the index for the edge of the shifted elements if self.shift >= 0: idx_edge = 0 else: idx_edge = x.shape[axis] - 1 index_edge = np.index_exp[:] * axis + (idx_edge,) + np.index_exp[:] * (len(x.shape) - axis - 1) y[index_edge] += np.sum(x[index_out], axis=axis) return y elif self.boundary == "symmetric": y[index_out] += np.flip(x[index_out], axis=axis) return y elif self.boundary == "reflect": # obtain the shifted-out index + 1 if self.shift >= 0: idx_begin = 1 idx_end = self.shift + 1 else: idx_begin = x.shape[axis] + self.shift - 1 idx_end = x.shape[axis] - 1 index_out2 = np.index_exp[:] * axis + np.index_exp[idx_begin:idx_end] + np.index_exp[:] * (len(x.shape) - axis - 1) y[index_out2] += np.flip(x[index_out], axis=axis) return y class _Flip(ops.Transform): """ ??? """ @decor.linear_transform_initialisation("unary") def __init__(self, axis=-1): self.axis = axis def forward(self, x): return np.flip(x, self.axis) @decor.put_child_values_arguments def adjoint(self, x): return np.flip(x, self.axis) class _Rot90(ops.Transform): """ ??? """ @decor.linear_transform_initialisation("unary") def __init__(self, k=1, axis=(0,1)): self.k = k self.axis = axis def forward(self, x): return np.rot90(x, self.k, self.axis) @decor.put_child_values_arguments def adjoint(self, x): return np.rot90(x, -self.k, self.axis) class _Transpose(ops.Transform): @decor.linear_transform_initialisation("unary") def __init__(self, axes=None): self.axes = axes def forward(self, x): return np.transpose(x, axes=self.axes) @decor.put_child_values_arguments def adjoint(self, x): if self.axes == None: return np.transpose(x) else: original_axes = np.zeros((len(self.axes),)) original_axes[np.array(self.axes)] = np.arange(len(self.axes)) original_axes = original_axes.astype(int) return np.transpose(x, original_axes) class _MaxOrMin(_Sum): # it has the same __init__ as _Sum """ ??? """ @decor.make_output_array @decor.normalise_axis def adjoint(self, x, *child_values): # expand the axis to have the same dimension as self.shape x_expand = x output_expand = self.value for axis in self.axis: x_expand = np.expand_dims(x, axis) output_expand =
np.expand_dims(output_expand, axis)
numpy.expand_dims
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Jun 4 10:34:22 2019 A class based formulation of other analyses. It is structured as: Dataset _| |_ | | Analysis Forecast ________________|________________ | | | SubxForecast EC45Forecast Seas5Forecast Dataset initialises the majority of the variables, handles data loading, copying and subsetting, and provides deseasonalising and data reduction methods. Analysis adds a preprocessing method for era5 data, and some additional variable setup Forecast adds an error correction method, and forecast-specific variable setup SubxForecast, EC45Forecast, and Seas5Forecast add filetype specific data processing. @author: josh """ import iris import copy as cp import datetime as dt import iris.coord_categorisation as iccat from iris.analysis.cartography import cosine_latitude_weights import numpy as np import cf_units import os class Dataset: def __init__(self,field,dates,leads=None): """ Dataset is the base class shared by all analysis and forecast data sets. It defines all functions that are generic between datasets. Not normally used directly. Args: * field - A string used to identify which fields to load from file. *date - a list or tuple of 2 datetime.datetime objects specifying the first and last datetime to include in the data *leads - used by the Forecast class only, a list or tuple of 2 floats, specifying minimum and maximum lead times in days to include. """ self.field=field self.dates=dates self._d_l,self._d_u=dates self.leads=leads #Only data of the same forecast hour is currently supported. assert dates[0].hour==dates[1].hour self.hour=[dates[0].hour] #Name of the primary time coordinate self.T="time" #The expected position of the primary time coordinate in the cube self.t=0 #The day of year associated with 'dates' self.calendar_bounds=[d.timetuple().tm_yday for d in dates] self.type=Dataset #A dictionary that can contain any number of iris CubeLists, each #labelled with a keyword. The load_data method generates a "data" and #a "clim" CubeList self.data={} #Used by the get_climatology method self.dist_means=None self.distribution=None #The time unit to use self.U=cf_units.Unit(f"Days since {cf_units.EPOCH}",\ calendar=cf_units.CALENDAR_GREGORIAN) #Constraints applied to the data at different points. self.constraints={ #keep only data with a valid time coordinate "load":iris.Constraint(cube_func=lambda cube: cube.coords(self.T)!=[]), #keep only data that falls within the calendar_bounds "calendar":iris.Constraint(coord_values={"day_of_year":lambda cell:\ self._in_calendar_bounds(cell)}), #keep only data for the right hour "hour":iris.Constraint(coord_values={"hour":lambda cell:\ np.isin(cell,self.hour)[0]}), #keep only data that falls within the dates "data":iris.Constraint(coord_values={self.T:lambda cell:\ self._d_l<=cell<=self._d_u}), #keep only data that falls outside the dates "clim":iris.Constraint(coord_values={self.T:lambda cell:\ (self._d_l>cell)or (cell>self._d_u)}) } self._setup() def _setup(self): """empty method used by derived classes.""" pass def set_path(self,path): """set the path from which to load data""" if os.path.isdir(path): self.path=path else: raise(ValueError("Not a valid path.")) def copy(self): """A method which returns a copy of the Dataset""" copy=self.type(self.field,self.dates,self.leads) copy.dist_means=self.dist_means copy.distribution=self.distribution copy.data=cp.deepcopy(self.data) return copy def add_constraints(self,constr_dict): """add a dictionary of constraints 'constr_dict' to the constraints attribute. Any previously defined keywords will be overwritten.""" for key in constr_dict: self.constraints[key]=constr_dict[key] def load_data(self,strict=True): """Load data from self.path as a list of iris cubes, preprocess it, and split it into two CubeLists "data" and "clim". """ CL=iris.cube.CubeList() fs=[self.path+f for f in os.listdir(self.path) if f.endswith(".nc")] for f in fs: CL.append(iris.load_cube(f,constraint=self.constraints["load"])) self.data=CL self._clean_loaded_data() a=self.data.extract(self.constraints["data"]) c=self.data.extract(self.constraints["clim"]) if strict: if a is None: raise(ValueError("No data after applying constraints.")) if c is None: raise(ValueError("No climatology data after applying constraints.")) self.data={"data":a,"clim":c} def _clean_loaded_data(self): """empty method used by derived classes.""" pass def _in_calendar_bounds(self,x): """Evaluates whether a real number x lies between the calendar_bounds of the dataset, wrapping around the end of the year if necessary.""" c0,c1=self.calendar_bounds if c1<c0: ans=(x<=c1) or (x>=c0) else: ans=(x<=c1) and (x>=c0) return ans def restrict_area(self,region): """A convenience method that restricts the spatial extent of the Dataset to one of a few preset domains, defined by a string "region". """ if region.lower()=="europe": lons=[-15,20] lats=[32,60] elif region.lower()=="france": lons=[-5,8] lats=[42,51] elif region.lower()=="north_atlantic": lons=[-80,40] lats=[30,90] else: raise(ValueError(f"Unrecognised region {region}.")) #We use this over intersection, because it works for cubelists area_constr=iris.Constraint(longitude=lambda x: lons[0]<=x<=lons[1],\ latitude=lambda x: lats[0]<=x<=lats[1]) for key in self.data: self.data[key]=self.data[key].extract(area_constr) def add_cat_coord(self,iccat_function,coordname,base_coord): """Adds a categorical coordinate to all cubes in Dataset.data, defined by 'iccat_function' relative to 'base_coord', and called 'coordname'. Note that the name of the new coord is defined internally by iccat_function; coordname serves only to graciously handle the case when that coordinate already exists.""" for key in self.data: for i,entry in enumerate(self.data[key]): if entry.coords(coordname)==[]: iccat_function(entry,base_coord) def change_units(self,unit_str=None,cf_unit=None): """Changes the units of all cubes in the Dataset to a new unit given either by a valid cf_units.Unit string specifier 'unit_str', or a cf_units.Unit object, 'cf_unit'.""" if unit_str is not None and cf_unit is not None: raise(ValueError("Only one unit can be provided.")) elif unit_str is not None: unit=cf_units.Unit(unit_str) elif cf_unit is not None: unit=cf_unit else: raise(ValueError("A unit must be provided.")) for key in self.data: for i,entry in enumerate(self.data[key]): entry.convert_units(unit) def change_dates(self,newdates): """ Redefines the 'dates' attribute to the list of 2 datetimes 'newdates', reapplying the "data" and "clim" constraints to match **currently quite slow for large cubelists** """ self.dates=newdates self._d_l,self._d_u=self.dates self.calendar_bounds=[d.timetuple().tm_yday for d in self.dates] CL_data=iris.cube.CubeList() CL_clim=iris.cube.CubeList() for key in self.data: a=self.data[key].extract(self.constraints["data"]) if a != []: CL_data.append(a) a=self.data[key].extract(self.constraints["clim"]) if a != []: CL_clim.append(a) CL_data=iris.cube.CubeList([c for C in CL_data for c in C]) CL_clim=iris.cube.CubeList([c for C in CL_clim for c in C]) self.data["data"]=CL_data.concatenate() self.data["clim"]=CL_clim.concatenate() def change_calendar(self,newcalendar): for key in self.data: for i,entry in enumerate(self.data[key]): newunit=cf_units.Unit(\ entry.coord("time").units.origin,calendar=newcalendar) self.data[key][i].coord("time").unit=newunit def aggregate_by(self,coords,bins,aggregator=iris.analysis.MEAN): """Aggregates the coordinates of all cubes in Dataset into user defined bins. Args: *coords - A list of strings which are the coordinates to be aggregated over. *bins - A corresponding list of lists 'bins'. bins[i] should contain the bounding values over which to group coords[i]. Kwargs: *aggregator -A valid iris.analysis.Aggregator object which specifies how to aggregate entries together. """ binlabels=[] for j,coord in enumerate(coords): binlabels.append(f"bin{j}") for key in self.data: for i,entry in enumerate(self.data[key]): for j,(coord,b) in enumerate(zip(coords,bins)): #remove potential old bins: if self.data[key][i].coords(f"bin{j}")!=[]: self.data[key][i].remove_coord(f"bin{j}") if self.data[key][i].coords(coord)==[]: raise(ValueError("No such coordinate in cube!")) label=np.digitize(entry.coord(coord).points,b) coord_dim=entry.coord_dims(entry.coord(coord)) entry.add_aux_coord(iris.coords.AuxCoord(label,\ var_name=f"bin{j}"),data_dims=coord_dim) self.data[key][i]=entry.aggregated_by(binlabels,aggregator) for j,coord in enumerate(coords): if self.data[key][i].coords(coord)!=[]: self.data[key][i].remove_coord(f"bin{j}") def collapse_over(self,coord,aggregator=iris.analysis.MEAN): """Collapses all cubes in Dataset over a single coordinate. Args: *coords - A string which is the coordinate to collapse. Kwargs: *aggregator -A valid iris.analysis.Aggregator object which specifies how to collapse the coordinate. """ for key in self.data: for i,entry in enumerate(self.data[key]): self.data[key][i]=self.data[key][i].collapsed(coord,aggregator) def apply_coslat_mean(self,mask=None): """Collapses the latitude and longitude coordinates of all cubes in Dataset, using a cosine latitude weighting. Kwargs: *mask: A cube with matching latitude and longitude coordinates to the cubes in Dataset. Each gridpoint in 'mask' should vary between 0 (totally masked) to 1 (totally unmasked). """ for key in self.data: for i,entry in enumerate(self.data[key]): weights = cosine_latitude_weights(entry) #include the land sea mask in the weighting if one was passed. if mask is not None: weights=weights*mask.data self.data[key][i]=entry.collapsed(["latitude","longitude"],\ iris.analysis.MEAN,weights=weights) def regrid_to(self,dataset=None,cube=None,regridder=iris.analysis.Linear()): """regrids every cube in Dataset to match either those of another Dataset object, or an iris.Cube object.""" if cube is None and dataset is None: raise(ValueError("No reference for regridding provided!")) elif cube is None: ref_cube=dataset.data["data"][0] else: ref_cube=cube for key in self.data: for i,entry in enumerate(self.data[key]): self.data[key][i]=entry.regrid(ref_cube,regridder) def apply(self,func,*args,in_place=True,keys=None,**kwargs): """A method which applies a function to every cube in Dataset Args: *func - A function of the type func(cube,*args,**kwargs). Kwargs: in_place - A boolean, specifying whether func returns an output or not. If True, cube is set equal to func(cube), unless the output is None, in which case cube is removed from the CubeList. """ if keys is None: keys=self.data for key in keys: for i,entry in enumerate(self.data[key]): result=func(entry,*args,**kwargs) if in_place: pass else: if result is not None: self.data[key][i]=result else: self.data[key].remove(self.data[key][i]) def apply_constraint(self,constraint,keys=None): """Apply a constraint to all cubes in Dataset""" if keys is None: keys=self.data for key in keys: self.data[key]=self.data[key].extract(constraint) def get_climatology(self,percentiles): """Finds the distribution of all values in the Dataset. Args: * percentiles - A numpy array ([p_1,...,p_N]) where 0<=p_i<=100, which defines the percentiles of the data distribution to calculate. """ self.percentiles=percentiles lat,lon=self.data["clim"][0].shape[-2:] dist=np.zeros([1,lat,lon]) #We call the whole cubelist into memory self.data["clim"].realise_data() dist=np.concatenate([f.data.reshape([-1,lat,lon]) for f in self.data["clim"]]) self.distribution=np.percentile(dist,percentiles,axis=0) self.distribution[0]-=0.01 means=np.zeros([len(percentiles)-1,lat,lon]) for i in range(len(percentiles)-1): for j in range(lat): for k in range(lon): means[i,j,k]=dist[np.digitize(dist[:,j,k],\ self.distribution[:,j,k],right=True)==i+1,j,k].mean() #interpolates empty bins as being halfway between the distribution bounds for i,j,k in np.argwhere(np.isnan(means)): means[i,j,k]=self.distribution[i:i+2,j,k].mean() self.dist_means=means def get_seasonal_cycle(self,N=4,period=365.25,keys=None): """Fits N sine modes to the data series, with frequencies of n/(365.25 days) for n in [1,...,N], in order to calculate a smooth seasonal cycle. Kwargs: *keys - A list of keys to self.data, specifying which data to use to calculate the cycle. If keys is None, all data in the dataset will be used. """ #Default is to include all data if keys is None: keys = [key for key in self.data] self.deseasonaliser=_Deseasonaliser(self.data,keys,N,period) self.deseasonaliser.fit_cycle() def remove_seasonal_cycle(self,deseasonaliser=None,strict_t_ax=False): if deseasonaliser is None: if self.deseasonaliser is None: raise(ValueError("No _Deseasonaliser object found.")) else: deseasonaliser=self.deseasonaliser if deseasonaliser.coeffs is None: deseasonaliser.fit_cycle() for key in self.data: for i,cube in enumerate(self.data[key]): cycle=deseasonaliser.evaluate_cycle(cube.coord("time"),strict=strict_t_ax) if cycle.shape!=cube.shape: dim_map=[cube.coord_dims(coord)[0] for coord in \ ["time","latitude","longitude"]] cycle=iris.util.broadcast_to_shape(cycle,cube.shape,dim_map) self.data[key][i].data=cube.data-cycle def set_time_axis_first(self,tname="time"): for key in self.data: for entry in self.data[key]: t_ax=entry.coord_dims(tname)[0] if t_ax!=0: ax=np.arange(entry.ndim) entry.transpose([t_ax,*ax[ax!=t_ax]]) class _Deseasonaliser: def __init__(self,data,keys,N,period=365.25,coeffs=None): self.raw_data=[] self.t=[] self.t_unit=None self.tref=None self.keys=keys self.N=N self.pnum=2*(N+1) self.period=period self.coeffs=None for key in keys: for cube in data[key]: self.raw_data.append(cube.data) if self.t_unit is not None: if self.t_unit!=cube.coord("time").units: raise(ValueError("Clashing time units in data.")) else: self.t_unit=cube.coord("time").units self.t.append(cube.coord("time").points) i=cube.coord_dims("time")[0] self.raw_data=
np.concatenate(self.raw_data,axis=i)
numpy.concatenate
from functools import partial from warnings import warn import numpy as np from numpy.polynomial.legendre import leggauss from scipy.special import erf, beta as beta_fn, gammaln from scipy.linalg import solve_triangular from numba import njit from .sys_utilities import hash_array def sub2ind(sizes, multi_index): r""" Map a d-dimensional index to the scalar index of the equivalent flat 1D array Examples -------- .. math:: \begin{bmatrix} 0,0 & 0,1 & 0,2\\ 1,0 & 1,1 & 1,2\\ 2,0 & 2,1 & 2,2 \end{bmatrix} \rightarrow \begin{bmatrix} 0 & 3 & 6\\ 1 & 4 & 7\\ 2 & 5 & 8 \end{bmatrix} >>> from pyapprox.utilities import sub2ind >>> sizes = [3,3] >>> ind = sub2ind(sizes,[1,0]) >>> print(ind) 1 Parameters ---------- sizes : integer The number of elems in each dimension. For a 2D index sizes = [numRows, numCols] multi_index : np.ndarray (len(sizes)) The d-dimensional index Returns ------- scalar_index : integer The scalar index See Also -------- pyapprox.utilities.sub2ind """ num_sets = len(sizes) scalar_index = 0 shift = 1 for ii in range(num_sets): scalar_index += shift * multi_index[ii] shift *= sizes[ii] return scalar_index def ind2sub(sizes, scalar_index, num_elems): r""" Map a scalar index of a flat 1D array to the equivalent d-dimensional index Examples -------- .. math:: \begin{bmatrix} 0 & 3 & 6\\ 1 & 4 & 7\\ 2 & 5 & 8 \end{bmatrix} \rightarrow \begin{bmatrix} 0,0 & 0,1 & 0,2\\ 1,0 & 1,1 & 1,2\\ 2,0 & 2,1 & 2,2 \end{bmatrix} >>> from pyapprox.utilities import ind2sub >>> sizes = [3,3] >>> sub = ind2sub(sizes,1,9) >>> print(sub) [1 0] Parameters ---------- sizes : integer The number of elems in each dimension. For a 2D index sizes = [numRows, numCols] scalar_index : integer The scalar index num_elems : integer The total number of elements in the d-dimensional matrix Returns ------- multi_index : np.ndarray (len(sizes)) The d-dimensional index See Also -------- pyapprox.utilities.sub2ind """ denom = num_elems num_sets = len(sizes) multi_index = np.empty((num_sets), dtype=int) for ii in range(num_sets-1, -1, -1): denom /= sizes[ii] multi_index[ii] = scalar_index / denom scalar_index = scalar_index % denom return multi_index def cartesian_product(input_sets, elem_size=1): r""" Compute the cartesian product of an arbitray number of sets. The sets can consist of numbers or themselves be lists or vectors. All the lists or vectors of a given set must have the same number of entries (elem_size). However each set can have a different number of scalars, lists, or vectors. Parameters ---------- input_sets The sets to be used in the cartesian product. elem_size : integer The size of the vectors within each set. Returns ------- result : np.ndarray (num_sets*elem_size, num_elems) The cartesian product. num_elems = np.prod(sizes)/elem_size, where sizes[ii] = len(input_sets[ii]), ii=0,..,num_sets-1. result.dtype will be set to the first entry of the first input_set """ import itertools out = [] # ::-1 reverse order to be backwards compatiable with old # function below for r in itertools.product(*input_sets[::-1]): out.append(r) out = np.asarray(out).T[::-1, :] return out try: from pyapprox.cython.utilities import cartesian_product_pyx # # fused type does not work for np.in32, np.float32, np.int64 # # so envoke cython cast # if np.issubdtype(input_sets[0][0],np.signedinteger): # return cartesian_product_pyx(input_sets,1,elem_size) # if np.issubdtype(input_sets[0][0],np.floating): # return cartesian_product_pyx(input_sets,1.,elem_size) # else: # return cartesian_product_pyx( # input_sets,input_sets[0][0],elem_size) # always convert to float then cast back cast_input_sets = [np.asarray(s, dtype=float) for s in input_sets] out = cartesian_product_pyx(cast_input_sets, 1., elem_size) out = np.asarray(out, dtype=input_sets[0].dtype) return out except: print('cartesian_product extension failed') num_elems = 1 num_sets = len(input_sets) sizes = np.empty((num_sets), dtype=int) for ii in range(num_sets): sizes[ii] = input_sets[ii].shape[0]/elem_size num_elems *= sizes[ii] # try: # from pyapprox.weave import c_cartesian_product # # note c_cartesian_product takes_num_elems as last arg and cython # # takes elem_size # return c_cartesian_product(input_sets, elem_size, sizes, num_elems) # except: # print ('cartesian_product extension failed') result = np.empty( (num_sets*elem_size, num_elems), dtype=type(input_sets[0][0])) for ii in range(num_elems): multi_index = ind2sub(sizes, ii, num_elems) for jj in range(num_sets): for kk in range(elem_size): result[jj*elem_size+kk, ii] =\ input_sets[jj][multi_index[jj]*elem_size+kk] return result def outer_product(input_sets): r""" Construct the outer product of an arbitary number of sets. Examples -------- .. math:: \{1,2\}\times\{3,4\}=\{1\times3, 2\times3, 1\times4, 2\times4\} = \{3, 6, 4, 8\} Parameters ---------- input_sets The sets to be used in the outer product Returns ------- result : np.ndarray(np.prod(sizes)) The outer product of the sets. result.dtype will be set to the first entry of the first input_set """ out = cartesian_product(input_sets) return np.prod(out, axis=0) try: from pyapprox.cython.utilities import outer_product_pyx # fused type does not work for np.in32, np.float32, np.int64 # so envoke cython cast if np.issubdtype(input_sets[0][0], np.signedinteger): return outer_product_pyx(input_sets, 1) if np.issubdtype(input_sets[0][0], np.floating): return outer_product_pyx(input_sets, 1.) else: return outer_product_pyx(input_sets, input_sets[0][0]) except ImportError: print('outer_product extension failed') num_elems = 1 num_sets = len(input_sets) sizes = np.empty((num_sets), dtype=int) for ii in range(num_sets): sizes[ii] = len(input_sets[ii]) num_elems *= sizes[ii] # try: # from pyapprox.weave import c_outer_product # return c_outer_product(input_sets) # except: # print ('outer_product extension failed') result = np.empty((num_elems), dtype=type(input_sets[0][0])) for ii in range(num_elems): result[ii] = 1.0 multi_index = ind2sub(sizes, ii, num_elems) for jj in range(num_sets): result[ii] *= input_sets[jj][multi_index[jj]] return result def unique_matrix_rows(matrix): unique_rows = [] unique_rows_set = set() for ii in range(matrix.shape[0]): key = hash_array(matrix[ii, :]) if key not in unique_rows_set: unique_rows_set.add(key) unique_rows.append(matrix[ii, :]) return np.asarray(unique_rows) def remove_common_rows(matrices): num_cols = matrices[0].shape[1] unique_rows_dict = dict() for ii in range(len(matrices)): matrix = matrices[ii] assert matrix.shape[1] == num_cols for jj in range(matrix.shape[0]): key = hash_array(matrix[jj, :]) if key not in unique_rows_dict: unique_rows_dict[key] = (ii, jj) elif unique_rows_dict[key][0] != ii: del unique_rows_dict[key] # else: # entry is a duplicate entry in the current. Allow this to # occur but only add one of the duplicates to the unique rows dict unique_rows = [] for key in list(unique_rows_dict.keys()): ii, jj = unique_rows_dict[key] unique_rows.append(matrices[ii][jj, :]) return np.asarray(unique_rows) def allclose_unsorted_matrix_rows(matrix1, matrix2): if matrix1.shape != matrix2.shape: return False matrix1_dict = dict() for ii in range(matrix1.shape[0]): key = hash_array(matrix1[ii, :]) # allow duplicates of rows if key not in matrix1_dict: matrix1_dict[key] = 0 else: matrix1_dict[key] += 1 matrix2_dict = dict() for ii in range(matrix2.shape[0]): key = hash_array(matrix2[ii, :]) # allow duplicates of rows if key not in matrix2_dict: matrix2_dict[key] = 0 else: matrix2_dict[key] += 1 if len(list(matrix1_dict.keys())) != len(list(matrix2_dict.keys())): return False for key in list(matrix1_dict.keys()): if key not in matrix2_dict: return False if matrix2_dict[key] != matrix1_dict[key]: return False return True def get_2d_cartesian_grid(num_pts_1d, ranges): r""" Get a 2d tensor grid with equidistant points. Parameters ---------- num_pts_1d : integer The number of points in each dimension ranges : np.ndarray (4) The lower and upper bound of each dimension [lb_1,ub_1,lb_2,ub_2] Returns ------- grid : np.ndarray (2,num_pts_1d**2) The points in the tensor product grid. [x1,x2,...x1,x2...] [y1,y1,...y2,y2...] """ # from math_tools_cpp import cartesian_product_double as cartesian_product from PyDakota.math_tools import cartesian_product x1 = np.linspace(ranges[0], ranges[1], num_pts_1d) x2 = np.linspace(ranges[2], ranges[3], num_pts_1d) abscissa_1d = [] abscissa_1d.append(x1) abscissa_1d.append(x2) grid = cartesian_product(abscissa_1d, 1) return grid def invert_permutation_vector(p, dtype=int): r""" Returns the "inverse" of a permutation vector. I.e., returns the permutation vector that performs the inverse of the original permutation operation. Parameters ---------- p: np.ndarray Permutation vector dtype: type Data type passed to np.ndarray constructor Returns ------- pt: np.ndarray Permutation vector that accomplishes the inverse of the permutation p. """ N = np.max(p) + 1 pt = np.zeros(p.size, dtype=dtype) pt[p] = np.arange(N, dtype=dtype) return pt def nchoosek(nn, kk): try: # SciPy >= 0.19 from scipy.special import comb except: from scipy.misc import comb result = np.asarray(np.round(comb(nn, kk)), dtype=int) if np.isscalar(result): result = np.asscalar(result) return result def total_degree_space_dimension(dimension, degree): r""" Return the number of basis functions in a total degree polynomial space, i.e. the space of all polynomials with degree at most degree. Parameters ---------- num_vars : integer The number of variables of the polynomials degree : The degree of the total-degree space Returns ------- num_terms : integer The number of basis functions in the total degree space Notes ----- Note .. math:: {n \choose k} = frac{\Gamma(n+k+1)}{\Gamma(k+1)\Gamma{n-k+1}}, \qquad \Gamma(m)=(m-1)! So for dimension :math:`d` and degree :math:`p` number of terms in subspace is .. math:: {d+p \choose p} = frac{\Gamma(d+p+1)}{\Gamma(p+1)\Gamma{d+p-p+1}}, \qquad \Gamma(m)=(m-1)! """ # return nchoosek(dimension+degree, degree) # Following more robust for large values return int(np.round( np.exp(gammaln(degree+dimension+1) - gammaln(degree+1) - gammaln( dimension+1)))) def total_degree_subspace_dimension(dimension, degree): r""" Return the number of basis functions in a total degree polynomial space, with degree equal to degree. Parameters ---------- num_vars : integer The number of variables of the polynomials degree : The degree of the total-degree space Returns ------- num_terms : integer The number of basis functions in the total degree space of a given degree """ # subspace_dimension = nchoosek(nvars+degree-1, degree) # Following more robust for large values subspace_dimension = int( np.round(np.exp(gammaln(degree+dimension) - gammaln(degree+1) - gammaln(dimension)))) return subspace_dimension def total_degree_encompassing_N(dimension, N): r""" Returns the smallest integer k such that the dimension of the total degree-k space is greater than N. """ k = 0 while total_degree_subspace_dimension(dimension, k) < N: k += 1 return k def total_degree_barrier_indices(dimension, max_degree): r""" Returns linear indices that bound total degree spaces Parameters ---------- dimension: int Parametric dimension max_degree: int Maximum polynomial degree Returns ------- degree_barrier_indices: list List of degree barrier indices up to (including) max_degree. """ degree_barrier_indices = [0] for degree in range(1, max_degree+1): degree_barrier_indices.append( total_degree_subspace_dimension(dimension, degree)) return degree_barrier_indices def total_degree_orthogonal_transformation(coefficients, d): r""" Returns an orthogonal matrix transformation that "matches" the input coefficients. Parameters ---------- coefficients: np.ndarray Length-N vector of expansion coefficients d: int Parametric dimension Returns ------- Q: np.ndarray A size N x N orthogonal matrix transformation. The first column is a unit vector in the direction of coefficients. """ from scipy.linalg import qr N = coefficients.size degree_barrier_indices = [1] max_degree = 0 while degree_barrier_indices[-1] < N-1: max_degree += 1 degree_barrier_indices.append( total_degree_subspace_dimension(d, max_degree)) q = np.zeros([N, N]) # Assume degree = 0 is just constant q[0, 0] = 1. for degree in range(1, max_degree+1): i1 = degree_barrier_indices[degree-1] i2 = degree_barrier_indices[degree] M = i2-i1 q[i1:i2, i1:i2] = qr(coefficients[i1:i2].reshape([M, 1]))[0] return q def get_low_rank_matrix(num_rows, num_cols, rank): r""" Construct a matrix of size num_rows x num_cols with a given rank. Parameters ---------- num_rows : integer The number rows in the matrix num_cols : integer The number columns in the matrix rank : integer The rank of the matrix Returns ------- Amatrix : np.ndarray (num_rows,num_cols) The low-rank matrix generated """ assert rank <= min(num_rows, num_cols) # Generate a matrix with normally distributed entries N = max(num_rows, num_cols) Amatrix = np.random.normal(0, 1, (N, N)) # Make A symmetric positive definite Amatrix = np.dot(Amatrix.T, Amatrix) # Construct low rank approximation of A eigvals, eigvecs = np.linalg.eigh(Amatrix.copy()) # Set smallest eigenvalues to zero. Note eigenvals are in # ascending order eigvals[:(eigvals.shape[0]-rank)] = 0. # Construct rank r A matrix Amatrix = np.dot(eigvecs, np.dot(np.diag(eigvals), eigvecs.T)) # Resize matrix to have requested size Amatrix = Amatrix[:num_rows, :num_cols] return Amatrix def adjust_sign_svd(U, V, adjust_based_upon_U=True): r""" Ensure uniquness of svd by ensuring the first entry of each left singular singular vector be positive. Only works for np.linalg.svd if full_matrices=False Parameters ---------- U : (M x M) matrix left singular vectors of a singular value decomposition of a (M x N) matrix A. V : (N x N) matrix right singular vectors of a singular value decomposition of a (M x N) matrix A. adjust_based_upon_U : boolean (default=True) True - make the first entry of each column of U positive False - make the first entry of each row of V positive Returns ------- U : (M x M) matrix left singular vectors with first entry of the first singular vector always being positive. V : (M x M) matrix right singular vectors consistent with sign adjustment applied to U. """ if U.shape[1] != V.shape[0]: raise ValueError( 'U.shape[1] must equal V.shape[0]. If using np.linalg.svd set full_matrices=False') if adjust_based_upon_U: s = np.sign(U[0, :]) else: s = np.sign(V[:, 0]) U *= s V *= s[:, np.newaxis] return U, V def adjust_sign_eig(U): r""" Ensure uniquness of eigenvalue decompotision by ensuring the first entry of the first singular vector of U is positive. Parameters ---------- U : (M x M) matrix left singular vectors of a singular value decomposition of a (M x M) matrix A. Returns ------- U : (M x M) matrix left singular vectors with first entry of the first singular vector always being positive. """ s = np.sign(U[0, :]) U *= s return U def sorted_eigh(C): r""" Compute the eigenvalue decomposition of a matrix C and sort the eigenvalues and corresponding eigenvectors by decreasing magnitude. Warning. This will prioritize large eigenvalues even if they are negative. Do not use if need to distinguish between positive and negative eigenvalues Input B: matrix (NxN) matrix to decompose Output e: vector (N) absolute values of the eigenvalues of C sorted by decreasing magnitude W: eigenvectors sorted so that they respect sorting of e """ e, W = np.linalg.eigh(C) e = abs(e) ind = np.argsort(e) e = e[ind[::-1]] W = W[:, ind[::-1]] s = np.sign(W[0, :]) s[s == 0] = 1 W = W*s return e.reshape((e.size, 1)), W def continue_pivoted_lu_factorization(LU_factor, raw_pivots, current_iter, max_iters, num_initial_rows=0): it = current_iter for it in range(current_iter, max_iters): # find best pivot if np.isscalar(num_initial_rows) and (it < num_initial_rows): # pivot=np.argmax(np.absolute(LU_factor[it:num_initial_rows,it]))+it pivot = it elif (not np.isscalar(num_initial_rows) and (it < num_initial_rows.shape[0])): pivot = num_initial_rows[it] else: pivot = np.argmax(np.absolute(LU_factor[it:, it]))+it # update pivots vector # swap_rows(pivots,it,pivot) raw_pivots[it] = pivot # apply pivots(swap rows) in L factorization swap_rows(LU_factor, it, pivot) # check for singularity if abs(LU_factor[it, it]) < np.finfo(float).eps: msg = "pivot %1.2e" % abs(LU_factor[it, it]) msg += " is to small. Stopping factorization." print(msg) break # update L_factor LU_factor[it+1:, it] /= LU_factor[it, it] # udpate U_factor col_vector = LU_factor[it+1:, it] row_vector = LU_factor[it, it+1:] update = np.outer(col_vector, row_vector) LU_factor[it+1:, it+1:] -= update return LU_factor, raw_pivots, it def unprecondition_LU_factor(LU_factor, precond_weights, num_pivots=None): r""" A=LU and WA=XY Then WLU=XY We also know Y=WU So WLU=XWU => WL=XW so L=inv(W)*X*W and U = inv(W)Y """ if num_pivots is None: num_pivots = np.min(LU_factor.shape) assert precond_weights.shape[1] == 1 assert precond_weights.shape[0] == LU_factor.shape[0] # left multiply L an U by inv(W), i.e. compute inv(W).dot(L) # and inv(W).dot(U) # `np.array` creates a new copy of LU_factor, faster than `.copy()` LU_factor = np.array(LU_factor)/precond_weights # right multiply L by W, i.e. compute L.dot(W) # Do not overwrite columns past num_pivots. If not all pivots have been # performed the columns to the right of this point contain U factor for ii in range(num_pivots): LU_factor[ii+1:, ii] *= precond_weights[ii, 0] return LU_factor def split_lu_factorization_matrix(LU_factor, num_pivots=None): r""" Return the L and U factors of an inplace LU factorization Parameters ---------- num_pivots : integer The number of pivots performed. This allows LU in place matrix to be split during evolution of LU algorithm """ if num_pivots is None: num_pivots = np.min(LU_factor.shape) L_factor = np.tril(LU_factor) if L_factor.shape[1] < L_factor.shape[0]: # if matrix over-determined ensure L is a square matrix n0 = L_factor.shape[0]-L_factor.shape[1] L_factor = np.hstack([L_factor, np.zeros((L_factor.shape[0], n0))]) if num_pivots < np.min(L_factor.shape): n1 = L_factor.shape[0]-num_pivots n2 = L_factor.shape[1]-num_pivots L_factor[num_pivots:, num_pivots:] = np.eye(n1, n2) np.fill_diagonal(L_factor, 1.) U_factor = np.triu(LU_factor) U_factor[num_pivots:, num_pivots:] = LU_factor[num_pivots:, num_pivots:] return L_factor, U_factor def truncated_pivoted_lu_factorization(A, max_iters, num_initial_rows=0, truncate_L_factor=True): r""" Compute a incomplete pivoted LU decompostion of a matrix. Parameters ---------- A np.ndarray (num_rows,num_cols) The matrix to be factored max_iters : integer The maximum number of pivots to perform. Internally max)iters will be set such that max_iters = min(max_iters,K), K=min(num_rows,num_cols) num_initial_rows: integer or np.ndarray() The number of the top rows of A to be chosen as pivots before any remaining rows can be chosen. If object is an array then entries are raw pivots which will be used in order. Returns ------- L_factor : np.ndarray (max_iters,K) The lower triangular factor with a unit diagonal. K=min(num_rows,num_cols) U_factor : np.ndarray (K,num_cols) The upper triangular factor raw_pivots : np.ndarray (num_rows) The sequential pivots used to during algorithm to swap rows of A. pivots can be obtained from raw_pivots using get_final_pivots_from_sequential_pivots(raw_pivots) pivots : np.ndarray (max_iters) The index of the chosen rows in the original matrix A chosen as pivots """ num_rows, num_cols = A.shape min_num_rows_cols = min(num_rows, num_cols) max_iters = min(max_iters, min_num_rows_cols) if (A.shape[1] < max_iters): msg = "truncated_pivoted_lu_factorization: " msg += " A is inconsistent with max_iters. Try deceasing max_iters or " msg += " increasing the number of columns of A" raise Exception(msg) # Use L to store both L and U during factoriation then copy out U in post # processing # `np.array` creates a new copy of A (faster than `.copy()`) LU_factor = np.array(A) raw_pivots = np.arange(num_rows) LU_factor, raw_pivots, it = continue_pivoted_lu_factorization( LU_factor, raw_pivots, 0, max_iters, num_initial_rows) if not truncate_L_factor: return LU_factor, raw_pivots else: pivots = get_final_pivots_from_sequential_pivots( raw_pivots)[:it+1] L_factor, U_factor = split_lu_factorization_matrix(LU_factor, it+1) L_factor = L_factor[:it+1, :it+1] U_factor = U_factor[:it+1, :it+1] return L_factor, U_factor, pivots def add_columns_to_pivoted_lu_factorization(LU_factor, new_cols, raw_pivots): r""" Given factorization PA=LU add new columns to A in unpermuted order and update LU factorization Parameters ---------- raw_pivots : np.ndarray (num_pivots) The pivots applied at each iteration of pivoted LU factorization. If desired one can use get_final_pivots_from_sequential_pivots to compute final position of rows after all pivots have been applied. """ assert LU_factor.shape[0] == new_cols.shape[0] assert raw_pivots.shape[0] <= new_cols.shape[0] num_pivots = raw_pivots.shape[0] for it, pivot in enumerate(raw_pivots): # inlined swap_rows() for performance new_cols[[it, pivot]] = new_cols[[pivot, it]] # update LU_factor # recover state of col vector from permuted LU factor # Let (jj,kk) represent iteration and pivot pairs # then if lu factorization produced sequence of pairs # (0,4),(1,2),(2,4) then LU_factor[:,0] here will be col_vector # in LU algorithm with the second and third permutations # so undo these permutations in reverse order next_idx = it+1 # `col_vector` is a copy of the LU_factor subset col_vector = np.array(LU_factor[next_idx:, it]) for ii in range(num_pivots-it-1): # (it+1) necessary in two lines below because only dealing # with compressed col vector which starts at row it in LU_factor jj = raw_pivots[num_pivots-1-ii]-next_idx kk = num_pivots-ii-1-next_idx # inlined swap_rows() col_vector[jj], col_vector[kk] = col_vector[kk], col_vector[jj] new_cols[next_idx:, :] -= np.outer(col_vector, new_cols[it, :]) LU_factor = np.hstack((LU_factor, new_cols)) return LU_factor def add_rows_to_pivoted_lu_factorization(LU_factor, new_rows, num_pivots): assert LU_factor.shape[1] == new_rows.shape[1] LU_factor_extra = np.array(new_rows) # take copy of `new_rows` for it in range(num_pivots): LU_factor_extra[:, it] /= LU_factor[it, it] col_vector = LU_factor_extra[:, it] row_vector = LU_factor[it, it+1:] update = np.outer(col_vector, row_vector) LU_factor_extra[:, it+1:] -= update return np.vstack([LU_factor, LU_factor_extra]) def swap_rows(matrix, ii, jj): matrix[[ii, jj]] = matrix[[jj, ii]] def pivot_rows(pivots, matrix, in_place=True): if not in_place: matrix = matrix.copy() num_pivots = pivots.shape[0] assert num_pivots <= matrix.shape[0] for ii in range(num_pivots): swap_rows(matrix, ii, pivots[ii]) return matrix def get_final_pivots_from_sequential_pivots(sequential_pivots, num_pivots=None): if num_pivots is None: num_pivots = sequential_pivots.shape[0] assert num_pivots >= sequential_pivots.shape[0] pivots = np.arange(num_pivots) return pivot_rows(sequential_pivots, pivots, False) def get_tensor_product_quadrature_rule( degrees, num_vars, univariate_quadrature_rules, transform_samples=None, density_function=None): r""" if get error about outer product failing it may be because univariate_quadrature rule is returning a weights array for every level, i.e. l=0,...level """ degrees = np.atleast_1d(degrees) if degrees.shape[0] == 1 and num_vars > 1: degrees = np.array([degrees[0]]*num_vars, dtype=int) if callable(univariate_quadrature_rules): univariate_quadrature_rules = [univariate_quadrature_rules]*num_vars x_1d = [] w_1d = [] for ii in range(len(univariate_quadrature_rules)): x, w = univariate_quadrature_rules[ii](degrees[ii]) x_1d.append(x) w_1d.append(w) samples = cartesian_product(x_1d, 1) weights = outer_product(w_1d) if density_function is not None: weights *= density_function(samples) if transform_samples is not None: samples = transform_samples(samples) return samples, weights def piecewise_quadratic_interpolation(samples, mesh, mesh_vals, ranges): assert mesh.shape[0] == mesh_vals.shape[0] vals = np.zeros_like(samples) samples = (samples-ranges[0])/(ranges[1]-ranges[0]) for ii in range(0, mesh.shape[0]-2, 2): xl = mesh[ii] xr = mesh[ii+2] x = (samples-xl)/(xr-xl) interval_vals = canonical_piecewise_quadratic_interpolation( x, mesh_vals[ii:ii+3]) # to avoid double counting we set left boundary of each interval to # zero except for first interval if ii == 0: interval_vals[(x < 0) | (x > 1)] = 0. else: interval_vals[(x <= 0) | (x > 1)] = 0. vals += interval_vals return vals # I = np.argsort(samples) # sorted_samples = samples[I] # idx2=0 # for ii in range(0,mesh.shape[0]-2,2): # xl=mesh[ii]; xr=mesh[ii+2] # for jj in range(idx2,sorted_samples.shape[0]): # if ii==0: # if sorted_samples[jj]>=xl: # idx1=jj # break # else: # if sorted_samples[jj]>xl: # idx1=jj # break # for jj in range(idx1,sorted_samples.shape[0]): # if sorted_samples[jj]>xr: # idx2=jj-1 # break # if jj==sorted_samples.shape[0]-1: # idx2=jj # x=(sorted_samples[idx1:idx2+1]-xl)/(xr-xl) # interval_vals = canonical_piecewise_quadratic_interpolation( # x,mesh_vals[ii:ii+3]) # vals[idx1:idx2+1] += interval_vals # return vals[np.argsort(I)] def canonical_piecewise_quadratic_interpolation(x, nodal_vals): r""" Piecewise quadratic interpolation of nodes at [0,0.5,1] Assumes all values are in [0,1]. """ assert x.ndim == 1 assert nodal_vals.shape[0] == 3 vals = nodal_vals[0]*(1.0-3.0*x+2.0*x**2)+nodal_vals[1]*(4.0*x-4.0*x**2) +\ nodal_vals[2]*(-x+2.0*x**2) return vals def discrete_sampling(N, probs, states=None): r""" discrete_sampling -- samples iid from a discrete probability measure x = discrete_sampling(N, prob, states) Generates N iid samples from a random variable X whose probability mass function is prob(X = states[j]) = prob[j], 1 <= j <= length(prob). If states is not given, the states are gives by 1 <= state <= length(prob) """ p = probs.squeeze()/np.sum(probs) bins = np.digitize( np.random.uniform(0., 1., (N, 1)), np.hstack((0, np.cumsum(p))))-1 if states is None: x = bins else: assert(states.shape[0] == probs.shape[0]) x = states[bins] return x.squeeze() def lists_of_arrays_equal(list1, list2): if len(list1) != len(list2): return False for ll in range(len(list1)): if not np.allclose(list1[ll], list2[ll]): return False return True def lists_of_lists_of_arrays_equal(list1, list2): if len(list1) != len(list2): return False for ll in range(len(list1)): for kk in range(len(list1[ll])): if not np.allclose(list1[ll][kk], list2[ll][kk]): return False return True def beta_pdf(alpha_stat, beta_stat, x): # scipy implementation is slow const = 1./beta_fn(alpha_stat, beta_stat) return const*(x**(alpha_stat-1)*(1-x)**(beta_stat-1)) def pdf_under_affine_map(pdf, loc, scale, y): return pdf((y-loc)/scale)/scale def beta_pdf_on_ab(alpha_stat, beta_stat, a, b, x): # const = 1./beta_fn(alpha_stat,beta_stat) # const /= (b-a)**(alpha_stat+beta_stat-1) # return const*((x-a)**(alpha_stat-1)*(b-x)**(beta_stat-1)) from functools import partial pdf = partial(beta_pdf, alpha_stat, beta_stat) return pdf_under_affine_map(pdf, a, (b-a), x) def beta_pdf_derivative(alpha_stat, beta_stat, x): r""" x in [0,1] """ # beta_const = gamma_fn(alpha_stat+beta_stat)/( # gamma_fn(alpha_stat)*gamma_fn(beta_stat)) beta_const = 1./beta_fn(alpha_stat, beta_stat) deriv = 0 if alpha_stat > 1: deriv += (alpha_stat-1)*(x**(alpha_stat-2)*(1-x)**(beta_stat-1)) if beta_stat > 1: deriv -= (beta_stat - 1)*(x**(alpha_stat-1)*(1-x)**(beta_stat-2)) deriv *= beta_const return deriv def gaussian_cdf(mean, var, x): return 0.5*(1+erf((x-mean)/(np.sqrt(var*2)))) def gaussian_pdf(mean, var, x, package=np): r""" set package=sympy if want to use for symbolic calculations """ return package.exp(-(x-mean)**2/(2*var)) / (2*package.pi*var)**.5 def gaussian_pdf_derivative(mean, var, x): return -gaussian_pdf(mean, var, x)*(x-mean)/var def pdf_derivative_under_affine_map(pdf_deriv, loc, scale, y): r""" Let y=g(x)=x*scale+loc and x = g^{-1}(y) = v(y) = (y-loc)/scale, scale>0 p_Y(y)=p_X(v(y))*|dv/dy(y)|=p_X((y-loc)/scale))/scale dp_Y(y)/dy = dv/dy(y)*dp_X/dx(v(y))/scale = dp_X/dx(v(y))/scale**2 """ return pdf_deriv((y-loc)/scale)/scale**2 def gradient_of_tensor_product_function(univariate_functions, univariate_derivatives, samples): num_samples = samples.shape[1] num_vars = len(univariate_functions) assert len(univariate_derivatives) == num_vars gradient = np.empty((num_vars, num_samples)) # precompute data which is reused multiple times function_values = [] for ii in range(num_vars): function_values.append(univariate_functions[ii](samples[ii, :])) for ii in range(num_vars): gradient[ii, :] = univariate_derivatives[ii](samples[ii, :]) for jj in range(ii): gradient[ii, :] *= function_values[jj] for jj in range(ii+1, num_vars): gradient[ii, :] *= function_values[jj] return gradient def evaluate_tensor_product_function(univariate_functions, samples): num_samples = samples.shape[1] num_vars = len(univariate_functions) values = np.ones((num_samples)) for ii in range(num_vars): values *= univariate_functions[ii](samples[ii, :]) return values def cholesky_decomposition(Amat): nrows = Amat.shape[0] assert Amat.shape[1] == nrows L = np.zeros((nrows, nrows)) for ii in range(nrows): temp = Amat[ii, ii]-np.sum(L[ii, :ii]**2) if temp <= 0: raise Exception('matrix is not positive definite') L[ii, ii] = np.sqrt(temp) L[ii+1:, ii] =\ (Amat[ii+1:, ii]-np.sum( L[ii+1:, :ii]*L[ii, :ii], axis=1))/L[ii, ii] return L def pivoted_cholesky_decomposition(A, npivots, init_pivots=None, tol=0., error_on_small_tol=False, pivot_weights=None, return_full=False, econ=True): r""" Return a low-rank pivoted Cholesky decomposition of matrix A. If A is positive definite and npivots is equal to the number of rows of A then L.dot(L.T)==A To obtain the pivoted form of L set L = L[pivots,:] Then P.T.dot(A).P == L.dot(L.T) where P is the standard pivot matrix which can be obtained from the pivot vector using the function """ Amat = A.copy() nrows = Amat.shape[0] assert Amat.shape[1] == nrows assert npivots <= nrows # L = np.zeros(((nrows,npivots))) L = np.zeros(((nrows, nrows))) # diag1 = np.diag(Amat).copy() # returns a copy of diag diag = Amat.ravel()[::Amat.shape[0]+1] # returns a view of diag # assert np.allclose(diag,diag1) pivots = np.arange(nrows) init_error = np.absolute(diag).sum() L, pivots, diag, chol_flag, ncompleted_pivots, error = \ continue_pivoted_cholesky_decomposition( Amat, L, npivots, init_pivots, tol, error_on_small_tol, pivot_weights, pivots, diag, 0, init_error, econ) if not return_full: return L[:, :ncompleted_pivots], pivots[:ncompleted_pivots], error,\ chol_flag else: return L, pivots, error, chol_flag, diag.copy(), init_error, \ ncompleted_pivots def continue_pivoted_cholesky_decomposition(Amat, L, npivots, init_pivots, tol, error_on_small_tol, pivot_weights, pivots, diag, ncompleted_pivots, init_error, econ): Amat = Amat.copy() # Do not overwrite incoming Amat if econ is False and pivot_weights is not None: msg = 'pivot weights not used when econ is False' raise Exception(msg) chol_flag = 0 assert ncompleted_pivots < npivots for ii in range(ncompleted_pivots, npivots): if init_pivots is None or ii >= len(init_pivots): if econ: if pivot_weights is None: pivot = np.argmax(diag[pivots[ii:]])+ii else: pivot = np.argmax( pivot_weights[pivots[ii:]]*diag[pivots[ii:]])+ii else: schur_complement = ( Amat[np.ix_(pivots[ii:], pivots[ii:])] - L[pivots[ii:], :ii].dot(L[pivots[ii:], :ii].T)) schur_diag = np.diagonal(schur_complement) pivot = np.argmax( np.linalg.norm(schur_complement, axis=0)**2/schur_diag) pivot += ii else: pivot = np.where(pivots == init_pivots[ii])[0][0] assert pivot >= ii swap_rows(pivots, ii, pivot) if diag[pivots[ii]] <= 0: msg = 'matrix is not positive definite' if error_on_small_tol: raise Exception(msg) else: print(msg) chol_flag = 1 break L[pivots[ii], ii] = np.sqrt(diag[pivots[ii]]) L[pivots[ii+1:], ii] = ( Amat[pivots[ii+1:], pivots[ii]] - L[pivots[ii+1:], :ii].dot(L[pivots[ii], :ii]))/L[pivots[ii], ii] diag[pivots[ii+1:]] -= L[pivots[ii+1:], ii]**2 # for jj in range(ii+1,nrows): # L[pivots[jj],ii]=(Amat[pivots[ii],pivots[jj]]- # L[pivots[ii],:ii].dot(L[pivots[jj],:ii]))/L[pivots[ii],ii] # diag[pivots[jj]] -= L[pivots[jj],ii]**2 error = diag[pivots[ii+1:]].sum()/init_error # print(ii,'error',error) if error < tol: msg = 'Tolerance reached. ' msg += f'Iteration:{ii}. Tol={tol}. Error={error}' # If matrix is rank r then then error will be machine precision # In such a case exiting without an error is the right thing to do if error_on_small_tol: raise Exception(msg) else: chol_flag = 1 print(msg) break return L, pivots, diag, chol_flag, ii+1, error def get_pivot_matrix_from_vector(pivots, nrows): P = np.eye(nrows) P = P[pivots, :] return P def determinant_triangular_matrix(matrix): return np.prod(np.diag(matrix)) def get_all_primes_less_than_or_equal_to_n(n): primes = list() primes.append(2) for num in range(3, n+1, 2): if all(num % i != 0 for i in range(2, int(num**.5) + 1)): primes.append(num) return np.asarray(primes) @njit(cache=True) def get_first_n_primes(n): primes = list() primes.append(2) num = 3 while len(primes) < n: # np.all does not work with numba # if np.all([num % i != 0 for i in range(2, int(num**.5) + 1)]): flag = True for i in range(2, int(num**.5) + 1): if (num % i == 0): flag = False break if flag is True: primes.append(num) num += 2 return np.asarray(primes) def approx_fprime(x, func, eps=np.sqrt(np.finfo(float).eps)): r"""Approx the gradient of a vector valued function at a single sample using finite_difference """ assert x.shape[1] == 1 nvars = x.shape[0] fprime = [] func_at_x = func(x).squeeze() assert func_at_x.ndim == 1 for ii in range(nvars): x_plus_eps = x.copy() x_plus_eps[ii] += eps fprime.append((func(x_plus_eps).squeeze()-func_at_x)/eps) return np.array(fprime) def partial_functions_equal(func1, func2): if not (isinstance(func1, partial) and isinstance(func2, partial)): return False are_equal = all([getattr(func1, attr) == getattr(func2, attr) for attr in ['func', 'args', 'keywords']]) return are_equal def get_all_sample_combinations(samples1, samples2): r""" For two sample sets of different random variables loop over all combinations samples1 vary slowest and samples2 vary fastest Let samples1 = [[1,2],[2,3]] samples2 = [[0, 0, 0],[0, 1, 2]] Then samples will be ([1, 2, 0, 0, 0]) ([1, 2, 0, 1, 2]) ([3, 4, 0, 0, 0]) ([3, 4, 0, 1, 2]) """ import itertools samples = [] for r in itertools.product(*[samples1.T, samples2.T]): samples.append(np.concatenate(r)) return np.asarray(samples).T def get_correlation_from_covariance(cov): r""" Compute the correlation matrix from a covariance matrix Parameters ---------- cov : np.ndarray (nrows,nrows) The symetric covariance matrix Returns ------- cor : np.ndarray (nrows,nrows) The symetric correlation matrix Examples -------- >>> cov = np.asarray([[2,-1],[-1,2]]) >>> get_correlation_from_covariance(cov) array([[ 1. , -0.5], [-0.5, 1. ]]) """ stdev_inv = 1/np.sqrt(np.diag(cov)) cor = stdev_inv[np.newaxis, :]*cov*stdev_inv[:, np.newaxis] return cor def compute_f_divergence(density1, density2, quad_rule, div_type, normalize=False): r""" Compute f divergence between two densities .. math:: \int_\Gamma f\left(\frac{p(z)}{q(z)}\right)q(x)\,dx Parameters ---------- density1 : callable The density p(z) density2 : callable The density q(z) normalize : boolean True - normalize the densities False - Check that densities are normalized, i.e. integrate to 1 quad_rule : tuple x,w - quadrature points and weights x : np.ndarray (num_vars,num_samples) w : np.ndarray (num_samples) div_type : string The type of f divergence (KL,TV,hellinger). KL - Kullback-Leibler :math:`f(t)=t\log t` TV - total variation :math:`f(t)=\frac{1}{2}\lvert t-1\rvert` hellinger - squared Hellinger :math:`f(t)=(\sqrt(t)-1)^2` """ x, w = quad_rule assert w.ndim == 1 density1_vals = density1(x).squeeze() const1 = density1_vals.dot(w) density2_vals = density2(x).squeeze() const2 = density2_vals.dot(w) if normalize: density1_vals /= const1 density2_vals /= const2 else: tol = 1e-14 # print(const1) # print(const2) assert np.allclose(const1, 1.0, atol=tol) assert np.allclose(const2, 1.0, atol=tol) const1, const2 = 1.0, 1.0 # normalize densities. May be needed if density is # Unnormalized Bayesian Posterior def d1(x): return density1(x)/const1 def d2(x): return density2(x)/const2 if div_type == 'KL': # Kullback-Leibler def f(t): return t*np.log(t) elif div_type == 'TV': # Total variation def f(t): return 0.5*np.absolute(t-1) elif div_type == 'hellinger': # Squared hellinger int (p(z)**0.5-q(z)**0.5)**2 dz # Note some formulations use 0.5 times above integral. We do not # do that here def f(t): return (np.sqrt(t)-1)**2 else: raise Exception(f'Divergence type {div_type} not supported') d1_vals, d2_vals = d1(x), d2(x) II = np.where(d2_vals > 1e-15)[0] ratios = np.zeros_like(d2_vals)+1e-15 ratios[II] = d1_vals[II]/d2_vals[II] if not np.all(np.isfinite(ratios)): print(d1_vals[II], d2_vals[II]) msg = 'Densities are not absolutely continuous. ' msg += 'Ensure that density2(z)=0 implies density1(z)=0' raise Exception(msg) divergence_integrand = f(ratios)*d2_vals return divergence_integrand.dot(w) def cholesky_solve_linear_system(L, rhs): r""" Solve LL'x = b using forwards and backwards substitution """ # Use forward subsitution to solve Ly = b y = solve_triangular(L, rhs, lower=True) # Use backwards subsitution to solve L'x = y x = solve_triangular(L.T, y, lower=False) return x def update_cholesky_factorization(L_11, A_12, A_22): r""" Update a Cholesky factorization. Specifically compute the Cholesky factorization of .. math:: A=\begin{bmatrix} A_{11} & A_{12}\\ A_{12}^T & A_{22}\end{bmatrix} where :math:`L_{11}` is the Cholesky factorization of :math:`A_{11}`. Noting that .. math:: \begin{bmatrix} A_{11} & A_{12}\\ A_{12}^T & A_{22}\end{bmatrix} = \begin{bmatrix} L_{11} & 0\\ L_{12}^T & L_{22}\end{bmatrix} \begin{bmatrix} L_{11}^T & L_{12}\\ 0 & L_{22}^T\end{bmatrix} we can equate terms to find .. math:: L_{12} = L_{11}^{-1}A_{12}, \quad L_{22}L_{22}^T = A_{22}-L_{12}^TL_{12} """ if L_11.shape[0] == 0: return np.linalg.cholesky(A_22) nrows, ncols = A_12.shape assert A_22.shape == (ncols, ncols) assert L_11.shape == (nrows, nrows) L_12 = solve_triangular(L_11, A_12, lower=True) print(A_22 - L_12.T.dot(L_12)) L_22 = np.linalg.cholesky(A_22 - L_12.T.dot(L_12)) L = np.block([[L_11, np.zeros((nrows, ncols))], [L_12.T, L_22]]) return L def update_cholesky_factorization_inverse(L_11_inv, L_12, L_22): nrows, ncols = L_12.shape L_22_inv = np.linalg.inv(L_22) L_inv = np.block( [[L_11_inv, np.zeros((nrows, ncols))], [-L_22_inv.dot(L_12.T.dot(L_11_inv)), L_22_inv]]) return L_inv def update_trace_involving_cholesky_inverse(L_11_inv, L_12, L_22_inv, B, prev_trace): r""" Update the trace of matrix matrix product involving the inverse of a matrix with a cholesky factorization. That is compute .. math:: \mathrm{Trace}\leftA^{inv}B\right} where :math:`A=LL^T` """ nrows, ncols = L_12.shape assert B.shape == (nrows+ncols, nrows+ncols) B_11 = B[:nrows, :nrows] B_12 = B[:nrows, nrows:] B_21 = B[nrows:, :nrows] B_22 = B[nrows:, nrows:] # assert np.allclose(B, np.block([[B_11, B_12],[B_21, B_22]])) C = -np.dot(L_22_inv.dot(L_12.T), L_11_inv) C_T_L_22_inv = C.T.dot(L_22_inv) trace = prev_trace + np.sum(C.T.dot(C)*B_11) + \ np.sum(C_T_L_22_inv*B_12) + np.sum(C_T_L_22_inv.T*B_21) + \ np.sum(L_22_inv.T.dot(L_22_inv)*B_22) return trace def num_entries_square_triangular_matrix(N, include_diagonal=True): r"""Num entries in upper (or lower) NxN traingular matrix""" if include_diagonal: return int(N*(N+1)/2) else: return int(N*(N-1)/2) def num_entries_rectangular_triangular_matrix(M, N, upper=True): r"""Num entries in upper (or lower) MxN traingular matrix. This is useful for nested for loops like (upper=True) for ii in range(M): for jj in range(ii+1): (upper=False) for jj in range(N): for ii in range(jj+1): """ assert M >= N if upper: return num_entries_square_triangular_matrix(N) else: return num_entries_square_triangular_matrix(M) -\ num_entries_square_triangular_matrix(M-N) def flattened_rectangular_lower_triangular_matrix_index(ii, jj, M, N): r""" Get flattened index kk from row and column indices (ii,jj) of a lower triangular part of MxN matrix """ assert M >= N assert ii >= jj if ii == 0: return 0 T = num_entries_rectangular_triangular_matrix(ii, min(ii, N), upper=False) kk = T+jj return kk def evaluate_quadratic_form(matrix, samples): r""" Evaluate x.T.dot(A).dot(x) for several vectors x Parameters ---------- num_samples : np.ndarray (nvars,nsamples) The vectors x matrix : np.ndarray(nvars,nvars) The matrix A Returns ------- vals : np.ndarray (nsamples) Evaluations of the quadratic form for each vector x """ return (samples.T.dot(matrix)*samples.T).sum(axis=1) def split_dataset(samples, values, ndata1): """ Split a data set into two sets. Parameters ---------- samples : np.ndarray (nvars,nsamples) The samples to be split values : np.ndarray (nsamples,nqoi) Values of the data at ``samples`` ndata1 : integer The number of samples allocated to the first split. All remaining samples will be added to the second split. Returns ------- samples1 : np.ndarray (nvars,ndata1) The samples of the first split data set values1 : np.ndarray (nvars,ndata1) The values of the first split data set samples2 : np.ndarray (nvars,ndata1) The samples of the first split data set values2 : np.ndarray (nvars,ndata1) The values of the first split data set """ assert ndata1 <= samples.shape[1] assert values.shape[0] == samples.shape[1] II = np.random.permutation(samples.shape[1]) samples1 = samples[:, II[:ndata1]] samples2 = samples[:, II[ndata1:]] values1 = values[II[:ndata1], :] values2 = values[II[ndata1:], :] return samples1, samples2, values1, values2 def leave_one_out_lsq_cross_validation(basis_mat, values, alpha=0, coef=None): """ let :math:`x_i` be the ith row of :math:`X` and let :math:`\beta=(X^\top X)^{-1}X^\top y` such that the residuals at the training samples satisfy .. math:: r_i = X\beta-y then the leave one out cross validation errors are given by .. math:: e_i = \frac{r_i}{1-h_i} where :math:`h_i = x_i^\top(X^\top X)^{-1}x_i` """ assert values.ndim == 2 assert basis_mat.shape[0] > basis_mat.shape[1]+2 gram_mat = basis_mat.T.dot(basis_mat) gram_mat += alpha*np.eye(gram_mat.shape[0]) H_mat = basis_mat.dot(np.linalg.inv(gram_mat).dot(basis_mat.T)) H_diag = np.diag(H_mat) if coef is None: coef = np.linalg.lstsq( gram_mat, basis_mat.T.dot(values), rcond=None)[0] assert coef.ndim == 2 residuals = basis_mat.dot(coef) - values cv_errors = residuals / (1-H_diag[:, None]) cv_score = np.sqrt(np.sum(cv_errors**2, axis=0)/basis_mat.shape[0]) return cv_errors, cv_score, coef def leave_many_out_lsq_cross_validation(basis_mat, values, fold_sample_indices, alpha=0, coef=None): nfolds = len(fold_sample_indices) nsamples = basis_mat.shape[0] cv_errors = [] cv_score = 0 gram_mat = basis_mat.T.dot(basis_mat) gram_mat += alpha*np.eye(gram_mat.shape[0]) if coef is None: coef = np.linalg.lstsq( gram_mat, basis_mat.T.dot(values), rcond=None)[0] residuals = basis_mat.dot(coef) - values gram_mat_inv =
np.linalg.inv(gram_mat)
numpy.linalg.inv
# -*- coding: utf-8 -*- from __future__ import division from collections import namedtuple import numpy as np import torch import time Transition = namedtuple('Transition', ('timestep', 'state', 'action', 'reward', 'nonterminal')) # blank_trans = Transition(0, torch.zeros(84, 84, dtype=torch.uint8), None, 0, False) # Segment tree data structure where parent node values are sum/max of children node values class SegmentTree(): def __init__(self, size): self.index = 0 self.size = size self.full = False # Used to track actual capacity self.sum_tree = np.zeros((2 * size - 1, ), dtype=np.float32) # Initialise fixed size tree with all (priority) zeros self.data = np.array([None] * size) # Wrap-around cyclic buffer self.max = 1 # Initial max value to return (1 = 1^ω) # Propagates value up tree given a tree index def _propagate(self, index, value): parent = (index - 1) // 2 left, right = 2 * parent + 1, 2 * parent + 2 self.sum_tree[parent] = self.sum_tree[left] + self.sum_tree[right] if parent != 0: self._propagate(parent, value) # Updates value given a tree index def update(self, index, value): self.sum_tree[index] = value # Set new value self._propagate(index, value) # Propagate value self.max = max(value, self.max) def append(self, data, value): self.data[self.index] = data # Store data in underlying data structure self.update(self.index + self.size - 1, value) # Update tree self.index = (self.index + 1) % self.size # Update index self.full = self.full or self.index == 0 # Save when capacity reached self.max = max(value, self.max) # Searches for the location of a value in sum tree def _retrieve(self, index, value): left, right = 2 * index + 1, 2 * index + 2 if left >= len(self.sum_tree): return index elif value <= self.sum_tree[left]: return self._retrieve(left, value) else: return self._retrieve(right, value - self.sum_tree[left]) def find_parallel(self, value): if isinstance(value, float): value =
np.array([value])
numpy.array
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # maxlengthations under the License. # ============================================================================== """Tests for bincount ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python.eager import context from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import bincount_ops from tensorflow.python.ops import gen_count_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.platform import test class TestSparseCount(test.TestCase, parameterized.TestCase): @parameterized.named_parameters( { "testcase_name": "_no_maxlength", "x": np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32), "expected_indices": [[0, 1], [0, 2], [0, 3], [1, 4], [1, 5]], "expected_values": [1, 1, 1, 2, 1], "expected_shape": [2, 6] }, { "testcase_name": "_maxlength", "x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32), "maxlength": 7, "expected_indices": [[0, 1], [0, 2], [0, 3], [1, 0], [1, 4]], "expected_values": [1, 1, 1, 1, 2], "expected_shape": [2, 7] }, { "testcase_name": "_minlength", "x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32), "minlength": 9, "expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4], [1, 7]], "expected_values": [1, 1, 1, 1, 1, 2, 1], "expected_shape": [2, 9] }, { "testcase_name": "_minlength_larger_values", "x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32), "minlength": 3, "expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4], [1, 7]], "expected_values": [1, 1, 1, 1, 1, 2, 1], "expected_shape": [2, 8] }, { "testcase_name": "_no_maxlength_binary", "x": np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32), "expected_indices": [[0, 1], [0, 2], [0, 3], [1, 4], [1, 5]], "expected_values": [1, 1, 1, 1, 1], "expected_shape": [2, 6], "binary_output": True, }, { "testcase_name": "_maxlength_binary", "x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32), "maxlength": 7, "expected_indices": [[0, 1], [0, 2], [0, 3], [1, 0], [1, 4]], "expected_values": [1, 1, 1, 1, 1], "expected_shape": [2, 7], "binary_output": True, }, { "testcase_name": "_minlength_binary", "x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32), "minlength": 9, "expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4], [1, 7]], "expected_values": [1, 1, 1, 1, 1, 1, 1], "expected_shape": [2, 9], "binary_output": True, }, { "testcase_name": "_minlength_larger_values_binary", "x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32), "minlength": 3, "expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4], [1, 7]], "expected_values": [1, 1, 1, 1, 1, 1, 1], "expected_shape": [2, 8], "binary_output": True, }, { "testcase_name": "_no_maxlength_weights", "x": np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32), "expected_indices": [[0, 1], [0, 2], [0, 3], [1, 4], [1, 5]], "expected_values": [2, 1, 0.5, 9, 3], "expected_shape": [2, 6], "weights": [[0.5, 1, 2], [3, 4, 5]] }, { "testcase_name": "_maxlength_weights", "x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32), "maxlength": 7, "expected_indices": [[0, 1], [0, 2], [0, 3], [1, 0], [1, 4]], "expected_values": [2, 1, 0.5, 3, 9], "expected_shape": [2, 7], "weights": [[0.5, 1, 2, 11], [7, 3, 4, 5]] }, { "testcase_name": "_minlength_weights", "x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32), "minlength": 9, "expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4], [1, 7]], "expected_values": [2, 1, 0.5, 3, 5, 13, 4], "expected_shape": [2, 9], "weights": [[0.5, 1, 2, 3], [4, 5, 6, 7]] }, { "testcase_name": "_minlength_larger_values_weights", "x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32), "minlength": 3, "expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4], [1, 7]], "expected_values": [2, 1, 0.5, 3, 5, 13, 4], "expected_shape": [2, 8], "weights": [[0.5, 1, 2, 3], [4, 5, 6, 7]] }, { "testcase_name": "_1d", "x": np.array([3, 2, 1, 1], dtype=np.int32), "expected_indices": [[1], [2], [3]], "expected_values": [2, 1, 1], "expected_shape": [4] }, { "testcase_name": "_all_axes", "x":
np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32)
numpy.array
import gym import sys import numpy as np import os.path as osp import multiprocessing as mp import sys import torch from IPython import embed class BasicTask: def __init__(self, max_steps=sys.maxsize): self.steps = 0 self.max_steps = max_steps def reset(self, *args): self.steps = 0 state = self.env.reset(*args) return state def normalize_state(self, state): return state def step(self, action): next_state, reward, done, info = self.env.step(action) self.steps += 1 done = (done or self.steps >= self.max_steps) return next_state, reward, done, info def random_action(self): return self.env.action_space.sample() class GridWorldTask(BasicTask): def __init__( self, layouts=['map{}'.format(i) for i in range(11, 31)], num_obj_types=5, task_length=2, history_length=4, max_steps=300, train_combos=None, test_combos=None, gaussian_img=True, record=False, ): from synpo.gridworld.env import GridWorld, read_map, ComboEnv, PORGBEnv self.train_combos = train_combos self.test_combos = test_combos self.num_combos = len(train_combos) + len(test_combos) self.env = PORGBEnv(ComboEnv(GridWorld( layouts, window=history_length, task_length=task_length, num_obj_types=num_obj_types, train_combos=train_combos, test_combos=test_combos, gaussian_img=gaussian_img)), record=record) self.action_dim = self.env.action_space.n self.max_steps = max_steps self.name = 'gridworld' def save_config(self): return self.__dict__ def reset(self, index=None, sample_pos=True, train=True): self.steps = 0 state = self.env.reset(index, sample_pos=sample_pos, train=train) return state[0] def step(self, action): next_state, reward, done, info = self.env.step(action) self.steps += 1 done = (done or self.steps >= self.max_steps) return next_state[0], reward, done, info def normalize_state(self, state): return np.asarray([
np.asarray(s)
numpy.asarray
import unittest import pysal import numpy as np from scipy import sparse from pysal.spreg import error_sp_het as HET from pysal.common import RTOL class TestBaseGMErrorHet(unittest.TestCase): def setUp(self): db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r") y = np.array(db.by_col("HOVAL")) self.y = np.reshape(y, (49,1)) X = [] X.append(db.by_col("INC")) X.append(db.by_col("CRIME")) self.X = np.array(X).T self.X = np.hstack((np.ones(self.y.shape),self.X)) self.X = sparse.csr_matrix(self.X) self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp")) self.w.transform = 'r' def test_model(self): reg = HET.BaseGM_Error_Het(self.y, self.X, self.w.sparse, step1c=True) betas = np.array([[ 47.99626638], [ 0.71048989], [ -0.55876126], [ 0.41178776]]) np.testing.assert_allclose(reg.betas,betas,RTOL) u = np.array([ 27.38122697]) np.testing.assert_allclose(reg.u[0],u,RTOL) ef = np.array([ 32.29765975]) np.testing.assert_allclose(reg.e_filtered[0],ef,RTOL) predy = np.array([ 53.08577603]) np.testing.assert_allclose(reg.predy[0],predy,RTOL) n = 49 np.testing.assert_allclose(reg.n,n) k = 3 np.testing.assert_allclose(reg.k,k) y = np.array([ 80.467003])
np.testing.assert_allclose(reg.y[0],y,RTOL)
numpy.testing.assert_allclose
# A python implementation of Ailey's matlab tensor code. import os import numpy as np import math import SimpleITK as sitk from scipy import ndimage import nibabel as nib from PIL import Image import scipy.misc from scipy import signal import warnings warnings.filterwarnings("ignore") def doggen(sigma): """ Helper function to generate derivatives of Gaussian kernels, in either 1D, 2D, or 3D. Source code in MATLAB obtained from <NAME>, Stanford University, September 2015 :param sigma: Sigma for use (see defaults in generate_FSL_structure_tensor) :return: Derivative of Gaussian kernel with dimensions of sigma. """ halfsize = np.ceil(3 * np.max(sigma)) x = range(np.single(-halfsize), np.single(halfsize + 1)); # Python colon is not inclusive at end, while MATLAB is. dim = len(sigma); if dim == 1: X = np.array(x); # Remember that, by default, numpy arrays are elementwise multiplicative X = X.astype(float); k = -X * np.exp(-X ** 2 / (2 * sigma ** 2)); elif dim == 2: [X, Y] = np.meshgrid(x, x); X = X.astype(float); Y = Y.astype(float); k = -X * np.exp(-X ** 2 / (2 * sigma[0] ^ 2) * np.exp(-Y ** 2)) elif dim == 3: [X, Y, Z] = np.meshgrid(x, x, x); X = X.transpose(0, 2, 1); # Obtained through vigorous testing (see below...) Y = Y.transpose(2, 0, 1); Z = Z.transpose(2, 1, 0); X = X.astype(float); Y = Y.astype(float); Z = Z.astype(float); k = -X * np.exp(np.divide(-np.power(X, 2), 2 * np.power(sigma[0], 2))) * np.exp( np.divide(-np.power(Y, 2), 2 * np.power(sigma[1], 2))) * np.exp( np.divide(-np.power(Z, 2), 2 * np.power(sigma[2], 2))) else: print 'Only supports up to 3 dimensions' return np.divide(k, np.sum(np.abs(k[:]))); def gaussgen(sigma): """ Function to generate Gaussian kernels, in 1D, 2D and 3D. Source code in MATLAB obtained from <NAME>, Stanford University, September 2015 :param sigma: Sigma for use in generating Gaussian kernel (see defaults in generate_FSL_structure_tensor) :return: Gaussian kernel with dimensions of sigma. """ halfsize = np.ceil(3 * max(sigma)); x = range(np.single(-halfsize), np.single(halfsize + 1)); dim = len(sigma); if dim == 1: x = x.astype(float); k = np.exp(-x ** 2 / (2 * sigma ^ 2)); elif dim == 2: [X, Y] = np.meshgrid(x, x); X = X.astype(float); Y = Y.astype(float); k = np.exp(-X ** 2 / (2 * sigma[0] ** 2)) * np.exp(-Y ** 2 / (2 * sigma[1] ** 2)); elif dim == 3: [X, Y, Z] = np.meshgrid(x, x, x); X = X.transpose(0, 2, 1); # Obtained through vigorous testing (see below...) Y = Y.transpose(2, 0, 1); Z = Z.transpose(2, 1, 0); X = X.astype(float); Y = Y.astype(float); Z = Z.astype(float); k = np.exp(-X ** 2 / (2 * sigma[0] ** 2)) * np.exp(-Y ** 2 / (2 * sigma[1] ** 2)) * np.exp( -Z ** 2 / (2 * sigma[2] ** 2)); else: print 'Only supports up to dimension 3' return np.divide(k, np.sum(np.abs(k))); def tiff_to_array(folder_path, input_path): """ Function takes a single image (TIFF, or other also works), and returns the single image as a numpy array. Called by tiff_stack_to_array. :param input_path: Single image file to open. :return: Numpy representation of image. """ im = Image.open(folder_path + input_path) # im.show() imarray = np.array(im) # print(imarray) return imarray def tiff_stack_to_array(input_path): """ Function takes input_path, which should should lead to a directory. Loads all TIFFs in input_path, then generates numpy arrays from the TIFF stack by calling tiff_to_array helper function. Make sure TIFF images are ordered in numerical order. :param input_path: Folder or directory containing .tiff stack. :return: Numpy array of tiff stack. """ im_list = []; for filename in os.listdir(input_path): if filename.endswith(".tiff"): # print(os.path.join(directory, filename)) im_arr = tiff_to_array(input_path, filename) im_list.append(im_arr) s = np.stack(im_list, axis=2) print s.shape return s def nii_to_tiff_stack(input_path, token): """ Function loads an nii using SITK, then converts the nii into a folder containing a TIFF stack. This function is useful later on for generating the structure tensor. :param input_path: Path to .nii file. :param token: Name of token. """ image = sitk.ReadImage(input_path); planes_number = image.GetSize(); data = sitk.GetArrayFromImage(image) z_dimension = planes_number[2]; ## if we have (i, j, k), we want (k, j, i) (converts nibabel format to sitk format) ##new_im = aut_1367.swapaxes(0,2) # just swap i and k if not os.path.exists(token + "_TIFFs"): os.makedirs(token + "_TIFFs"); plane = 0; for plane in range(0, z_dimension): output = data[plane, :, :] scipy.misc.toimage(output).save(token + "_TIFFs/" + token + "_" + str(plane) + '.tiff') def generate_FSL_structure_tensor(img_data, filename, dogsigmaArr=[1], gausigmaArr=[2.3], angleArr=[25]): """ Function takes a numpy array (from TIFF_stack_to_array) and saves output FSL structure tensor as filename string. Allows inputting alternate dogsigmaArr, gausigmaArr, angleArr, although defaults to currently to parameters from MATLAB script. Also returns tensorfsl (the tensor fsl structure) image numpy array. ## Parameters (the script loops through all parameters and saves each result automatically) # dogsigmaArr = [1]; Sigma values for derivative of gaussian filter, recommended value: 0.6 - 1.3 (based on actual data) # gausigmaArr = [2.3]; Sigma values for gaussian filter, recommended value: 1.3 - 2.3 (based on actual data) # angleArr = [25]; Angle thresholds for fiber tracking, recommended value: 20 - 30. Follows code from MATLAB CAPTURE scripts. :param img_data: Numpy array of image, typically from tiff_stack_to_array called on a directory of TIFFs. :param filename: Name to save the FSL structure tensor as. :param dogsigmaArr: Sigma values for derivative of Gaussian filter, with recommended values between 0.6 - 1.3. :param gausigmaArr: Sigma values for Gaussian filter, with recommended values between 1.3 - 2.3. :param angleArr: Angle threshold for fiber tracking, with recommended values between 20 - 30. :return tensorfsl: TensorFSL format of structure tensor (upper triangular matrix) """ for jj in range(len(dogsigmaArr)): dogsigma = dogsigmaArr[jj]; print "Start DoG Sigma on " + str(dogsigma); # Generate dog kernels dogkercc = doggen([dogsigma, dogsigma, dogsigma]); dogkercc = np.transpose(dogkercc, (0, 2, 1)); # annoying # print dogkercc.shape; # print dogkercc[:, :, 0]; dogkerrr =
np.transpose(dogkercc, (1, 0, 2))
numpy.transpose
""" Modified from matterport/Mask_RCNN/samples/shapes/ shapes.py train_shapes.ipynb """ import os import sys import math import random import numpy as np import cv2 import matplotlib import matplotlib.pyplot as plt import json DATASET_PATH = r"dataset\coco2014" CLASS_NAME = r"cat" # Root directory of the project ROOT_DIR = os.path.abspath("") # Import Mask RCNN sys.path.append(ROOT_DIR) # To find local version of the library from mrcnn.config import Config from mrcnn import utils import mrcnn.model as modellib from mrcnn import visualize from mrcnn.model import log # Directory to save logs and trained model MODEL_DIR = os.path.join(ROOT_DIR, "logs") # Local path to trained weights file COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5") # Download COCO trained weights from Releases if needed if not os.path.exists(COCO_MODEL_PATH): utils.download_trained_weights(COCO_MODEL_PATH) class CustomConfig(Config): """Configuration for training on the toy shapes dataset. Derives from the base Config class and overrides values specific to the toy shapes dataset. """ # Give the configuration a recognizable name NAME = CLASS_NAME # Train on 1 GPU and 8 images per GPU. We can put multiple images on each # GPU because the images are small. Batch size is 8 (GPUs * images/GPU). GPU_COUNT = 1 IMAGES_PER_GPU = 4 # 8 # Number of classes (including background) NUM_CLASSES = 1 + 1 # background + cat # Use small images for faster training. Set the limits of the small side # the large side, and that determines the image shape. IMAGE_MIN_DIM = 256 IMAGE_MAX_DIM = 256 # Use smaller anchors because our image and objects are small RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels # Reduce training ROIs per image because the images are small and have # few objects. Aim to allow ROI sampling to pick 33% positive ROIs. TRAIN_ROIS_PER_IMAGE = 32 # Use a small epoch since the data is simple STEPS_PER_EPOCH = 10 # use small validation steps since the epoch is small VALIDATION_STEPS = 5 class InferenceConfig(CustomConfig): GPU_COUNT = 1 IMAGES_PER_GPU = 1 class CustomDataset(utils.Dataset): """Generates the shapes synthetic dataset. The dataset consists of simple shapes (triangles, squares, circles) placed randomly on a blank surface. The images are generated on the fly. No file access required. """ def load_data(self, dataset_dir, subset): """Load a subset of the nuclei dataset. dataset_dir: Root directory of the dataset subset: Subset to load. Either the name of the sub-directory, such as stage1_train, stage1_test, ...etc. or, one of: * train: stage1_train excluding validation images * val: validation images from VAL_IMAGE_IDS """ # Add classes. We have one class. # Naming the dataset nucleus, and the class nucleus self.add_class(CLASS_NAME, 1, CLASS_NAME) # Which subset? # "val": use hard-coded list above # "train": use data from stage1_train minus the hard-coded list above # else: use the data from the specified sub-directory dataset_dir = os.path.join(dataset_dir, subset) # Get image ids from directory names file_list = [v for v in os.listdir(dataset_dir) if ".jpg" in v] image_ids = list(range(len(file_list))) print(image_ids) # Add images for image_id in image_ids: self.add_image("cat", image_id=image_id, path=os.path.join(dataset_dir, file_list[image_id])) def image_reference(self, image_id): """Return the shapes data of the image.""" info = self.image_info[image_id] if info["source"] == "shapes": return info["shapes"] else: super(self.__class__).image_reference(self, image_id) def load_mask(self, image_id): """Generate instance masks for shapes of the given image ID. """ info = self.image_info[image_id] json_data = str(info['path']).replace(".jpg", ".json") fin = open(json_data, 'r') json_dict = json.load(fin) h, w = json_dict['imageHeight'], json_dict['imageWidth'] masks = [] for shape_data in json_dict["shapes"]: mask =
np.zeros([h, w], np.uint8)
numpy.zeros
import cv2 import numpy as np from aip import AipOcr from PIL import Image, ImageDraw, ImageFont import os, math def crop_image(src_img, x_start, x_end, y_start, y_end): """ 图片裁剪 :param src_img: 原始图片 :param x_start: x 起始坐标 :param x_end: x 结束坐标 :param y_start: y 开始坐标 :param y_end: y 结束坐标 :return: """ tmp_img = cv2.cvtColor(src_img, cv2.COLOR_BGR2RGB) tmp_img = tmp_img[y_start:y_end, x_start:x_end] # 长,宽 return cv2.cvtColor(tmp_img, cv2.COLOR_RGB2BGR) def adjust_lightness(src_img, lightness_value): """ :param src_img: 待调整亮度的图片 :param lightness_value: 亮度值 :return: """ height, width, channel = src_img.shape # 获取shape的数值,height和width、通道 # 新建全零图片数组src2,将height和width,类型设置为原图片的通道类型(色素全为零,输出为全黑图片) src2 = np.zeros([height, width, channel], src_img.dtype) # new_img = cv2.addWeighted(src_img, a, src2, 1 - a, lightnessValue) # 处理后的图片 new_img = cv2.addWeighted(src_img, 1, src2, 1, lightness_value) # 处理后的图片 return new_img def add_watermark(src_img, water_text, position, color): """ 添加水印 :param src_img: 原始图片 :param water_text: 水印文字 :param position: 水印位置 :param color: 水印文字颜色 :return: """ # 根据选择的位置,确定水印的起始位置 height, width, channel = src_img.shape x_padding, y_padding = width * 0.05, height * 0.05 # 与边缘的间距 scale = min((width / 1000), (height / 1000)) # 按照图片的长宽大小对字体进行一个放大,scale 即为放大倍数 font_size = 20 + int(scale) * 5 # 根据 scale 增加字体的大小,从而使得字体大小适应图片的大小 font_path = "{0}/ui/font.ttf".format(os.getcwd()) font = ImageFont.truetype(font_path, font_size, encoding="utf-8") # 获取自定义的字体 (text_width, text_height) = font.getsize(water_text) x_start, y_start = 0, 0 # 水印文字的左下角坐标 if position == "左上角": x_start = x_padding y_start = y_padding elif position == "右上角": x_start = width - text_width - x_padding y_start = y_padding elif position == "中间": x_start = (width - text_width) / 2 y_start = (height - text_height) / 2 elif position == "左下角": x_start = x_padding y_start = height - y_padding - text_height elif position == "右下角": x_start = width - text_width - x_padding y_start = height - y_padding - text_height img_pil = Image.fromarray(cv2.cvtColor(src_img, cv2.COLOR_BGR2RGB)) # 将 OpenCV 的 BGR 色彩转换成 PIL 需要的 RGB 色彩 draw = ImageDraw.Draw(img_pil) draw.text((x_start, y_start), water_text, color, font=font) return cv2.cvtColor(np.asarray(img_pil), cv2.COLOR_RGB2BGR) # 将 PIL 的 RGB 色彩转换成 OpenCV 的 BGR 色彩 def gaussian_blur(src_img, x_start, x_end, y_start, y_end, ksize, sigmaX): """ 高斯模糊 """ blur = src_img[y_start:y_end, x_start:x_end] blur = cv2.GaussianBlur(blur, ksize, sigmaX) src_img[y_start:y_end, x_start:x_end] = blur return src_img def compress_img(src_img, size): """ 调整图片到指定大小 """ return cv2.resize(src_img, size, interpolation=cv2.INTER_AREA) def img_stitching(images): """ 图片拼接 """ stitcher = cv2.Stitcher_create() status, stitch_img = stitcher.stitch(images) if status != cv2.Stitcher_OK: print(f"合拼图片失败,status = {status}") return stitch_img def img_encoding(image, dir_path): """ 图片加密 :return: """ height, width, channel = image.shape # 随机创建密钥文件 img_key = np.random.randint(0, 256, size=[height, width, channel], dtype=np.uint8) # 保存密钥 np.save(dir_path + "/" + "img_key2", img_key) # 返回加密后的图片 return cv2.bitwise_xor(image, img_key) def img_decoding(image, key_file_path): """ 图片解密 """ img_key =
np.load(key_file_path)
numpy.load
''' <NAME> <EMAIL> November 4, 2017 ''' import sys import random import numpy as np import scipy as sc import matplotlib as mlp import matplotlib.pyplot as plt from matplotlib import rc from scipy import special from scipy import stats def particleFilter(nsteps, a, b, c, d): ''' Completes a simple 1D linear Guassian particle filter xt = ax + N(0,b); yt = cxt + N(0,d); Args: nsteps (int) = number of steps to sample a, b, c, d (float) = linear guassian parameters Returns: s (np.array) = 2xD array of particle samples for x_t and y_t ''' s = np.zeros((2,nsteps)) s[0,0] = np.random.normal(0,1.0) for i in range(1,nsteps): #P(x_t|x_t-1) s[0,i] = np.random.normal(a*s[0,i-1], np.sqrt(b)) #P(y_t|x_t) s[1,i] = np.random.normal(c*s[0,i], np.sqrt(d)) return s def kalmanFilter(nstep, Y, a, b, c, d): ''' Completes a simple Kalman filter for 1D linear state-space xt = ax + N(0,b); yt = cxt + N(0,d); Args: nstep (int) = number of steps to sample Y (np.array) = 1xN set of observation data a, b, c, d (float) = linear guassian parameters Returns: ''' mu = np.zeros(nstep) var = np.zeros(nstep) mu[0] = 0 var[0] = 1.0 for i in range(1,nstep): #Covar_{t|t-1} phat = a*var[i-1]*a + b k = phat*c/(c*phat*c + d) #Covar_{t|t} var[i] = phat - k*c*phat #mu_{t|t} mu[i] = a*mu[i-1] + k*(Y[i] - c*a*mu[i-1]) return mu, var def gaussian1D(x, mu, var): """ Calculates 1D gaussian density Args: x (flost) = point of interest mu (float) = mean var (float) = Variance squared """ small = 1e-8 e = (x-mu)*(1/(var+small)) e = e*(x-mu) e = np.exp(-0.5*e) return 1.0/(np.power(2*np.pi*(var+small),0.5))*e def bootStrapFilter(y, nsteps, N, a, b, c, d, ess, resamp = 'standard'): ''' Executes bootstrap filter Args: y (nd.array) = [D] array of observation data nsteps (int) = number of timesteps N (int) = number of particles a, b, c, d (float) = linear guassian parameters ess (float) = ESS trigger (set to number of particles if you want to resample every timestep) resamp (string) = resampling method (standard, systematic) Returns: x (nd.array) = [nsteps, D] array of states w_hist (nd.array) = [nsteps, D] array of filtering distributions g(y|x) ''' small = 1e-8 x = np.zeros((nsteps, N)) + small w_log = np.zeros((nsteps, N)) w = np.zeros((nsteps, N)) w_hist = np.zeros((nsteps, N)) #Initialize x, weights, log-weights x[0,:] = np.random.normal(0, 1, N) w[0,:] = 1.0/N + np.zeros((N)) w_log[0,:] = np.log(w[0,:]) w_hist[0,:] = w[0,:] #Iterate over timesteps for i in range(1,nsteps): #First, sample particles for states x[i,:] = np.random.normal(a*x[i-1,:], np.sqrt(b), N) #Second update the importance weights w_hist[i,:] = gaussian1D(y[i], c*x[i,:], d) w_log[i,:] = w_log[i-1,:] + np.log(w_hist[i,:] + small) w_log[i,:] = w_log[i,:] - np.max(w_log[i,:]) w[i,:] = np.exp(w_log[i,:])/np.sum(np.exp(w_log[i,:])) #Calculate Kish's effective sample size neff = 1.0/np.sum(np.power(w[i,:],2)) #ESS trigger if(neff < ess): #Third resample the points if(resamp == 'systematic'): ind = resampleSystematic(w[i,:],N) else: #Standard resampling ind = resample(w[i,:],N) x = np.take(x, ind, 1) w[i,:] = 1.0/N + np.zeros((N)) w_log[i,:] = np.log(w[i,:]) return x, w_hist def auxFilter(y, nsteps, N, a, b, c, d, ess = float("inf"), resamp = 'standard'): ''' Executes fully adapted auxilary particle filter Args: y (nd.array) = [D] array of observation data nsteps (int) = number of timesteps N (int) = number of particles a, b, c, d (float) = linear guassian parameters Returns: x (nd.array) = [nsteps, D] array of states w_hist (nd.array) = [nsteps, D] array of filtering distributions g(y|x) ''' small = 1e-5 x = np.zeros((nsteps, N)) + small w_log = np.zeros((nsteps, N)) w =
np.zeros((nsteps, N))
numpy.zeros
import numpy as np import pandas as pd import csv from sklearn.datasets.base import Bunch from sklearn import datasets from sklearn.preprocessing import StandardScaler from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import ShuffleSplit from sklearn.metrics import r2_score from collections import defaultdict X = [[0, 15], [1, -10]] print(StandardScaler().fit(X).transform(X)) iris_data_set = datasets.load_iris() x = iris_data_set.data y = iris_data_set.target print(iris_data_set['feature_names']) # print(x, y) def load_my_fancy_dataset(): with open('ds1.csv') as csv_file: data_file = csv.reader(csv_file) temp = next(data_file) n_samples = int(temp[0]) n_features = int(temp[1]) data = np.empty((n_samples, n_features)) target = np.empty((n_samples,), dtype=np.int) feature_names = temp[2:] for i, sample in enumerate(data_file): data[i] = np.asarray(sample[:-1], dtype=np.float64) target[i] =
np.asarray(sample[-1], dtype=np.int)
numpy.asarray
"""EBLUP for the unit level model. This module implements the basic EBLUP unit level model. The functionalities are organized in classes. Each class has three main methods: *fit()*, *predict()* and *bootstrap_mse()*. Linear Mixed Models (LMM) are the core underlying statistical framework used to model the hierarchical nature of the small area estimation (SAE) techniques implemented in this module, see <NAME>. and <NAME>. (2001) [#ms2001]_ for more details on LMM. The *EblupUnitModel* class implements the model developed by <NAME>., <NAME>., and <NAME>. (1988) [#bhf1988]_. The model parameters can fitted using restricted maximum likelihood (REML) and maximum likelihood (ML). The normality assumption of the errors is not necessary to predict the point estimates but is required for the taylor MSE estimation. The predictions takes into account sampling rates. A bootstrap MSE estimation method is also implemted for this class. For a comprehensive review of the small area estimation models and its applications, see <NAME>. and <NAME>. (2015) [#rm2015]_. .. [#ms2001] <NAME>.and <NAME>. (2001), *Generalized, Linear, Mixed Models*, New York: John Wiley & Sons, Inc. .. [#bhf1988] <NAME>., <NAME>., and <NAME>. (1988). An error-components model for prediction of county crop areas using survey and satellite data, *Journal of the American Statistical Association*, **83**, 28-36. .. [#rm2015] <NAME>. and <NAME>. (2015), *Small area estimation, 2nd edn.*, <NAME> & Sons, Hoboken, New Jersey. """ from __future__ import annotations import warnings from typing import Any, Optional, Union import numpy as np import pandas as pd import statsmodels.api as sm from samplics.sae.sae_core_functions import area_stats from samplics.utils.basic_functions import sumby from samplics.utils.formats import numpy_array, dict_to_dataframe from samplics.utils.types import Array, DictStrNum, Number, StringNumber class EblupUnitModel: """*EblupUnitModel* implements the basic unit level model for means (a linear indicator). *EblupUnitModel* takes the sample data as input and fits the basic linear mixed model. The user can pick between restricted maximum likelihood (REML) or maximum likelihood (ML) to fit the model parameters. Also, EblupUnitModel predicts the areas means and provides the point and mean squared error (MSE) estimates of the empirical Bayes linear unbiased (EBLUP). User can also obtain the bootstrap mse estimates of the MSE. Setting attributes | method (str): the fitting method of the model parameters which can take the possible | values restricted maximum likelihood (REML) or maximum likelihood (ML). | If not specified, "REML" is used as default. Sample related attributes | ys (array): the output sample observations. | Xs (ndarray): the auxiliary information. | scales (array): an array of scaling parameters for the unit levels errors. | afactors (array): sum of the inverse squared of scale. | areas (array): the full vector of small areas from the sampled observations. | areas_list (array): the list of small areas from the sample data. | samp_size (dict): the sample size per small areas from the sample. | ys_mean (array): sample area means of the output variable. | Xs_mean (ndarray): sample area means of the auxiliary variables.}}}}} Model fitting attributes | fitted (boolean): indicates whether the model has been fitted or not. | fixed_effects (array): the estimated fixed effects of the regression model. | fe_std (array): the estimated standard errors of the fixed effects. | random_effects (array): the estimated area level random effects. | re_std (number): the estimated standard error of the random effects. | error_std (number): the estimated standard error of the unit level residuals. | convergence (dict): a dictionnary holding the convergence status and the number of | iterations from the model fitting algorithm. | goodness (dict): a dictionary holding the log-likelihood, AIC, and BIC. | gamma (dict): ratio of the between-area variability (re_std**2) to the total | variability (re_std**2 + error_std**2 / a_factor). Prediction related attributes | areap (array): the list of areas for the prediction. | Xmean (array): population means of the auxiliary variables. | number_reps (int): number of replicates for the bootstrap MSE estimation. | samp_rate (dict): sampling rates at the area level. | area_est (array): area level EBLUP estimates. | area_mse (array): area level taylor estimation of the MSE. | area_mse_boot (array): area level bootstrap estimation of the MSE. Main methods | fit(): fits the linear mixed model to estimate the model parameters using REMl or ML | methods. | predict(): predicts the area level mean estimates which includes both the point | estimates and the taylor MSE estimate. | bootstrap_mse(): computes the area level bootstrap MSE estimates of the mean. """ def __init__( self, method: str = "REML", ): # Setting self.method: str = method.upper() if self.method not in ("REML", "ML"): raise AssertionError("Value provided for method is not valid!") # Sample data self.scales: np.ndarray self.afactors: DictStrNum self.ys: np.ndarray self.Xs: np.ndarray self.areas: np.ndarray self.areas_list: np.ndarray self.samp_size: DictStrNum self.ys_mean: np.ndarray self.Xs_mean: np.ndarray # Fitting stats self.fitted: bool = False self.fixed_effects: np.ndarray self.fe_std: np.ndarray self.random_effects: np.ndarray self.re_std: Number = 0 self.error_std: Number = 0 self.convergence: dict[str, Union[float, int, bool]] = {} self.goodness: dict[str, Number] = {} # loglikehood, deviance, AIC, BIC self.gamma: DictStrNum # Predict(ion/ed) data self.areap: np.ndarray self.Xp_mean: np.ndarray self.number_reps: int = 0 self.samp_rate: DictStrNum self.area_est: DictStrNum self.area_mse: DictStrNum self.area_mse_boot: Optional[DictStrNum] = None def _beta( self, y: np.ndarray, X: np.ndarray, area: np.ndarray, weight: np.ndarray, ) -> np.ndarray: Xw = X * weight[:, None] p = X.shape[1] beta1 = np.zeros((p, p)) beta2 = np.zeros(p) for d in np.unique(area): aread = area == d w_d = weight[aread] y_d = y[aread] X_d = X[aread] Xw_d = Xw[aread] Xw_d_bar = np.sum(Xw_d, axis=0) / np.sum(w_d) resid_d_w = X_d - Xw_d_bar * self.gamma[d] beta1 = beta1 + np.matmul(np.transpose(Xw_d), resid_d_w) beta2 = beta2 + np.sum(resid_d_w * y_d[:, None] * w_d[:, None], axis=0) return np.asarray(np.matmul(np.linalg.inv(beta1), beta2)) def _mse( self, areas: np.ndarray, Xs_mean: np.ndarray, Xp_mean: np.ndarray, gamma: np.ndarray, samp_size: np.ndarray, afactor: np.ndarray, A_inv: np.ndarray, ) -> np.ndarray: sigma2e = self.error_std ** 2 sigma2u = self.re_std ** 2 g1 = gamma * sigma2e / afactor xbar_diff = Xp_mean - gamma[:, None] * Xs_mean g2_matrix = xbar_diff @ A_inv @
np.transpose(xbar_diff)
numpy.transpose
from __future__ import print_function from __future__ import division import numpy as np import csv from scipy.ndimage.filters import gaussian_filter1d import os import file_namer import spectrum_functions as specfun import sys from scipy import stats def open_csv(filename, mode='r'): """Open a csv file in proper mode depending on Python version.""" return(open(filename, mode=mode+'b') if sys.version_info[0] == 2 else open(filename, mode=mode, newline='')) def ImportCSV(filename): x = np.genfromtxt(filename, delimiter = '\t', dtype = None, skip_header = 1) return x class Spectrum(object): def __init__(self, intensity, units, SpectrumRange=None): self.intensity = intensity self.units = units self.length = len(self.intensity) self.SpectrumRange = SpectrumRange def SaveSpectrumAsCSV(self,filename): filename = file_namer.name_file(filename) ExportSpectrumRange = np.copy(self.SpectrumRange) ExportIntensity = np.copy(self.intensity) ExportSpectrumRange.resize(len(ExportSpectrumRange), 1) ExportIntensity.resize(len(ExportIntensity), 1) ExportData = np.append(ExportSpectrumRange, ExportIntensity, axis = 1) ExportHeaders = [ (self.unit_label + ' (' + self.units + ')'), 'Intensity'] with open_csv(filename, 'w') as csvfile: writer = csv.writer(csvfile, delimiter = '\t') writer.writerow(ExportHeaders) writer.writerows(ExportData) print('Saved file...', filename) def SmoothingFilter1D(self, sigma=2): kernel = np.array([1, 1, 2, 1, 1])/6. intensity = np.append(self.intensity[4::-1], np.append(self.intensity, self.intensity[-5::])) smoothed = np.convolve(intensity, kernel, mode='same') smoothed = gaussian_filter1d(self.intensity, sigma) # smoothed[self.intensity > (0.01*np.max(self.intensity))] = self.intensity[self.intensity > (0.01*np.max(self.intensity))] return smoothed class CLSpectrum(Spectrum): def __init__(self, intensity, WavelengthRange, units='nm'): super(CLSpectrum, self).__init__(intensity, units) self.SpectrumRange = WavelengthRange self.unit_label = 'Wavelength' self.secondary_units = 'eV' self.secondary_unit_label = 'Energy' @classmethod def LoadFromCSV(cls, filename): spectrum = ImportCSV(filename) return cls(intensity=spectrum[:, 1], WavelengthRange=spectrum[:, 0], units='nm') class EELSSpectrum(Spectrum): def __init__(self, intensity, SpectrumRange=None, channel_eV = None, dispersion=0.005, ZLP=None, units='eV'): '''intensity: 1D array SpectrumRange: 1D array channel_eV: 2 element array [channel #, eV value] dispersion: float, width of each channel, must be provided if SpectrumRange is not, default is 5meV ZLP: Boolean - True=ZLP is present units: string, for plot axis ''' super(EELSSpectrum, self).__init__(intensity, units) if (SpectrumRange is not None): if (len(intensity) != len(SpectrumRange)): raise ValueError('intensity and SpectrumRange are not the same length!') if SpectrumRange is not None: self.dispersion = SpectrumRange[1] - SpectrumRange[0] else: self.dispersion = dispersion if ZLP: self.ZLP = self.FindZLP(self.intensity) if SpectrumRange is not None: self.SpectrumRange = SpectrumRange else: self.SpectrumRange = np.arange(0 - self.ZLP, self.length - self.ZLP) * self.dispersion else: if SpectrumRange is not None: self.SpectrumRange = SpectrumRange elif channel_eV is not None: if len(channel_eV) == 2: eV0 = channel_eV[1] - channel_eV[0] * dispersion self.SpectrumRange = np.linspace( eV0, eV0 + self.length * dispersion, self.length ) else: raise ValueError('You need to define the channel and the energy!') else: raise ValueError('You need to input the energy range!') self.ZLP = int(round(0 - np.min(SpectrumRange)/self.dispersion)) self.unit_label = 'Energy' self.secondary_units = 'nm' self.secondary_unit_label = 'Wavelength' @classmethod def LoadFromCSV(cls, filename): spectrum = ImportCSV(filename) return cls(intensity=spectrum[:, 1], SpectrumRange=spectrum[:,0], dispersion=spectrum[1,0]-spectrum[0,0], units='eV') def FindZLP(self, data): ZLP = int(stats.mode(np.argmax(data, axis = -1), axis=None)[0]) return ZLP def Normalize(self, ind=None): '''Normalize data to integral''' data_norm = specfun.normalize(self.intensity, ind) return EELSSpectrum(data_norm, SpectrumRange=self.SpectrumRange, dispersion=self.dispersion, ZLP=self.ZLP, units=self.units) def SymmetrizeAroundZLP(self): if self.ZLP < (self.length-1)/2.: data_sym = np.delete(self.intensity, np.s_[(2*self.ZLP+1):self.length], axis = -1) range_sym = np.delete(self.SpectrumRange, np.s_[(2*self.ZLP+1):self.length], axis = -1) elif self.ZLP > (self.length-1)/2.: data_sym = np.delete(self.intensity, np.s_[:np.maximum(2*self.ZLP+1-self.length, 0)], axis = -1) range_sym = np.delete(self.SpectrumRange, np.s_[:np.maximum(2*self.ZLP+1-self.length, 0)], axis = -1) else: data_sym = self.intensity range_sym = self.SpectrumRange data_sym[data_sym<0] = 0 return EELSSpectrum(data_sym, SpectrumRange=range_sym, dispersion=self.dispersion, ZLP=self.ZLP, units=self.units) def PadSpectrum(self, pad_length, pad_value=0, pad_side='left'): if pad_side == 'left': padded = np.append(np.ones((pad_length, )) * pad_value, self.intensity) padded_range = np.append( np.linspace( self.SpectrumRange[0] - pad_length * self.dispersion, self.SpectrumRange[0] - self.dispersion, pad_length), self.SpectrumRange) elif pad_side == 'right': padded = np.append(self.intensity, np.ones((pad_length, 1)) * pad_value) padded_range = np.append( self.SpectrumRange, np.linspace( self.SpectrumRange[-1] + self.dispersion, self.SpectrumRange[-1] + pad_length * self.dispersion, pad_length) ) else: padded = np.append( np.append( np.ones((pad_length, 1)) * pad_value, self.intensity), np.ones((pad_length, 1)) * pad_value) padded_range = np.append( np.append( np.linspace( self.SpectrumRange[0] - pad_length * self.dispersion, self.SpectrumRange[0] - self.dispersion, pad_length), self.SpectrumRange), np.linspace( self.SpectrumRange[-1] + self.dispersion, self.SpectrumRange[-1] + pad_length * self.dispersion, pad_length) ) return EELSSpectrum(padded, SpectrumRange=padded_range, dispersion=self.dispersion, ZLP=self.ZLP, units=self.units) def FindFW(self, intensityfraction): FW = specfun.find_fw(self.intensity, self.dispersion, self.ZLP, intensityfraction) return FW def RL_PSFsym(self, PSF, PSF_pad=0): PSF_sym = PSF.SymmetrizeAroundZLP() if PSF_pad is not None: data_length = np.size(self.SpectrumRange) PSF_length = np.size(PSF_sym.intensity) pad_length = int(data_length/2 - (1 + data_length) % 2 - (PSF_length-(PSF_length % 2))/2) if PSF_sym.ZLP < data_length/2: PSF_sym = PSF.PadSpectrum(pad_length, pad_value=PSF_pad, pad_side='left').SymmetrizeAroundZLP() elif PSF_sym.ZLP > data_length/2: PSF_sym = PSF_sym.PadSpectrum(pad_length, pad_value=PSF_pad, pad_side='right') return PSF_sym def RLDeconvolution(self, RLiterations, PSF): ''' Input: RLiterations=number of iterations to perform PSF=point spread function (an EELS spectrum object) ''' print('Beginning deconvolution...') x_deconv = RL(RLiterations, PSF.intensity, self.intensity) print('Done %s iterations!' %RLiterations) return EELSSpectrum(x_deconv, SpectrumRange=self.SpectrumRange, dispersion=self.dispersion, units=self.units) def eVSlice(self, starteV, stopeV): sliced = specfun.slice_range( self.intensity, [starteV, stopeV], self.SpectrumRange) return sliced #Richardson-Lucy algorithm def RL(iterations, PSF, Spec): RL4 = Spec.copy() for ii in range(iterations): RL1 =
np.convolve(PSF, RL4, 'same')
numpy.convolve
############################################################## # Likelihood for the KiDS+VIKING-450 correlation functions # ############################################################## # # Originally set up by <NAME> and <NAME> # for CFHTLenS data, by adapting <NAME>'s Monte Python # likelihood euclid_lensing and <NAME>'s CosmoMC likelihood # for weak lensing (adapted itself from JL's CosmoMC likelihood # for the COSMOS). # # Adjusted for KV450 correlation function data from Hildebrandt # et al. 2018 (arXiv:1812.06076) by <NAME> and Hendrik # Hildebrandt. # # Adjusted for consistency test purpose by <NAME> # # Data available from: # # http://kids.strw.leidenuniv.nl/sciencedata.php # # ATTENTION: # This likelihood only produces valid results for \Omega_k = 0, # i.e. flat cosmologies! ############################################################## from __future__ import print_function import sys from montepython.likelihood_class import Likelihood import io_mp #import scipy.integrate from scipy import interpolate as itp from scipy import special from scipy.linalg import cholesky, solve_triangular import os import numpy as np import math #from timeit import default_timer as timer try: xrange except NameError: xrange = range class kv450_joint_fix_cosmo_H0(Likelihood): def __init__(self, path, data, command_line): Likelihood.__init__(self, path, data, command_line) # for loading of Nz-files: self.z_bins_min = [0.1, 0.3, 0.5, 0.7, 0.9] self.z_bins_max = [0.3, 0.5, 0.7, 0.9, 1.2] # number of angular bins in which xipm is measured # we always load the full data vector with 9 data points for xi_p and # xi_m each; they are cut to the fiducial scales (or any arbitrarily # defined scales with the 'cut_values.dat' files! self.ntheta = 9 # Force the cosmological module to store Pk for redshifts up to # max(self.z) and for k up to k_max self.need_cosmo_arguments(data, {'output': 'mPk'}) self.need_cosmo_arguments(data, {'P_k_max_h/Mpc': self.k_max_h_by_Mpc}) ## Compute non-linear power spectrum if requested #if (self.use_halofit): # it seems like HMcode needs the full argument to work... if self.method_non_linear_Pk in ['halofit', 'HALOFIT', 'Halofit', 'hmcode', 'Hmcode', 'HMcode', 'HMCODE']: self.need_cosmo_arguments(data, {'non linear': self.method_non_linear_Pk}) print('Using {:} to obtain the non-linear corrections for the matter power spectrum, P(k, z)! \n'.format(self.method_non_linear_Pk)) else: print('Only using the linear P(k, z) for ALL calculations \n (check keywords for "method_non_linear_Pk"). \n') # TODO: move min_kmax_hmc to data-file?! # might not be really necessary; I didn't see a difference in the P(k, z) ratios between # HMcode complaining about k_max being too low and not complaining at all... if self.method_non_linear_Pk in ['hmcode', 'Hmcode', 'HMcode', 'HMCODE']: #self.need_cosmo_arguments(data, {'hmcode_min_k_max': 1000.}) min_kmax_hmc = 170. if self.k_max_h_by_Mpc < min_kmax_hmc: self.need_cosmo_arguments(data, {'P_k_max_h/Mpc': min_kmax_hmc}) #print("Your choice of k_max_h_by_Mpc is too small for HMcode. \n Requested P_k_max_h/Mpc now up to k = {:.2f} h/Mpc \n This does NOT influence the scale above".format(min_kmax_hmc)) # This is for Cl-integration only! # Define array of l values, and initialize them # It is a logspace # find nlmax in order to reach lmax with logarithmic steps dlnl self.nlmax = np.int(np.log(self.lmax) / self.dlnl) + 1 # redefine slightly dlnl so that the last point is always exactly lmax self.dlnl = np.log(self.lmax) / (self.nlmax - 1) self.l = np.exp(self.dlnl * np.arange(self.nlmax)) #print(self.l.min(), self.l.max(), self.l.shape) self.nzbins = len(self.z_bins_min) self.nzcorrs = self.nzbins * (self.nzbins + 1) / 2 # Create labels for loading of dn/dz-files: self.zbin_labels = [] for i in xrange(self.nzbins): self.zbin_labels += ['{:.1f}t{:.1f}'.format(self.z_bins_min[i], self.z_bins_max[i])] # read in public data vector: # sub-sample 1 temp1 = self.__load_public_data_vector(id_sample='1') theta_bins_1 = temp1[:, 0] if (np.sum( (theta_bins_1[:self.ntheta] - theta_bins_1[self.ntheta:])**2) > 1e-6): raise io_mp.LikelihoodError( 'The angular values at which xi_p and xi_m ' 'are observed do not match.') # sub-sample 2 temp2 = self.__load_public_data_vector(id_sample='2') theta_bins_2 = temp2[:, 0] if (np.sum( (theta_bins_2[:self.ntheta] - theta_bins_2[self.ntheta:])**2) > 1e-6): raise io_mp.LikelihoodError( 'The angular values at which xi_p and xi_m ' 'are observed do not match.') # we assume theta_bins1 ~ theta_bins2 # so we smear the slightly difference by average self.theta_bins = (theta_bins_1 + theta_bins_2) / 2. # create the data-vector: # xi_obs = {xi1(theta1, z_11)...xi1(theta_k, z_11), xi2(theta_1, z_11)... # xi2(theta_k, z_11);...; xi1(theta1, z_nn)...xi1(theta_k, z_nn), # xi2(theta_1, z_nn)... xi2(theta_k, z_nn)} self.xi_obs_1 = self.__get_xi_obs(temp1[:, 1:]) self.xi_obs_2 = self.__get_xi_obs(temp2[:, 1:]) # load the full covariance matrix: covmat11 = self.__load_public_cov_mat(id_cov='11') covmat12 = self.__load_public_cov_mat(id_cov='12') covmat22 = self.__load_public_cov_mat(id_cov='22') # covmat21 = covmat12.transpose() # build a combined cov-mat, for that to work we assume, that the cov-mat dimension fits # to the size of the *uncut*, single data-vector and is ordered in the same way as the # *final* data-vector created here (i.e. vec = [xi+(1,1), xi-(1,1), xi+(1,2), xi-(1,2),...]! covmat = np.asarray(np.bmat('covmat11, covmat12; covmat21, covmat22')) # Read angular cut values if self.use_cut_theta: cut_values = np.zeros((self.nzbins, 2)) cutvalues_file_path = os.path.join(self.data_directory, 'KV450_COSMIC_SHEAR_DATA_RELEASE/SUPPLEMENTARY_FILES/CUT_VALUES/' + self.cutvalues_file) if os.path.exists(cutvalues_file_path): cut_values = np.loadtxt(cutvalues_file_path) else: raise io_mp.LikelihoodError('File not found:\n {:} \n Check that requested file exists in the following folder: \n {:}'.format(cutvalues_file_path, self.data_directory + 'KV450_COSMIC_SHEAR_DATA_RELEASE/SUPPLEMENTARY_FILES/CUT_VALUES/')) # Compute theta mask if self.use_cut_theta: mask = self.__get_mask(cut_values) else: mask = np.ones(2 * self.nzcorrs * self.ntheta) # combine two vectors' mask mask = np.concatenate((mask, mask)) self.mask_indices = np.where(mask == 1)[0] # apply mask also to covariance matrix covmat = covmat[np.ix_(self.mask_indices, self.mask_indices)] # precompute Cholesky transform for chi^2 calculation: self.cholesky_transform = cholesky(covmat, lower=True) # load theta-dependent c-term function if requested # file is assumed to contain values for the same theta values as used # for xi_pm! if self.use_cterm_function: fname = os.path.join(self.data_directory, 'KV450_COSMIC_SHEAR_DATA_RELEASE/SUPPLEMENTARY_FILES/KV450_xi_pm_c_term.dat') # function is measured over same theta scales as xip, xim self.xip_c_per_zbin, self.xim_c_per_zbin = np.loadtxt(fname, usecols=(3, 4), unpack=True) print('Loaded (angular) scale-dependent c-term function from: \n', fname, '\n') #print(self.xip_c_per_zbin.shape) ##################################################################### # read redshift distribution ##################################################################### # Read fiducial dn_dz from window files: z_samples1 = [] hist_samples1 = [] z_samples2 = [] hist_samples2 = [] for zbin in range(self.nzbins): # sub-sample 1 window_file_path = os.path.join( self.data_directory, 'KV450_{:}/redshift'.format(self.sample1) + '/Nz_{0:}/Nz_{0:}_Mean/Nz_{0:}_z{1:}.asc'.format(self.nz_method, self.zbin_labels[zbin])) if os.path.exists(window_file_path): zptemp, hist_pz = np.loadtxt(window_file_path, usecols=[0, 1], unpack=True) shift_to_midpoint = np.diff(zptemp)[0] / 2. if zbin == 0: zpcheck = zptemp if np.sum((zptemp - zpcheck)**2) > 1e-6: raise Exception('The redshift values for the window files at different bins do not match.') # we add a zero as first element because we want to integrate down to z = 0! z_samples1 += [np.concatenate((np.zeros(1), zptemp + shift_to_midpoint))] hist_samples1 += [np.concatenate((np.zeros(1), hist_pz))] else: raise Exception("dn/dz file not found:\n %s"%window_file_path) # sub-sample 2 window_file_path = os.path.join( self.data_directory, 'KV450_{:}/redshift'.format(self.sample2) + '/Nz_{0:}/Nz_{0:}_Mean/Nz_{0:}_z{1:}.asc'.format(self.nz_method, self.zbin_labels[zbin])) if os.path.exists(window_file_path): zptemp, hist_pz = np.loadtxt(window_file_path, usecols=[0, 1], unpack=True) shift_to_midpoint = np.diff(zptemp)[0] / 2. if zbin == 0: zpcheck = zptemp if np.sum((zptemp - zpcheck)**2) > 1e-6: raise Exception('The redshift values for the window files at different bins do not match.') # we add a zero as first element because we want to integrate down to z = 0! z_samples2 += [np.concatenate((np.zeros(1), zptemp + shift_to_midpoint))] hist_samples2 += [np.concatenate((np.zeros(1), hist_pz))] else: raise Exception("dn/dz file not found:\n %s"%window_file_path) print('Loaded redshift distributions from (sample1): \n', os.path.join( self.data_directory, 'KV450_{:}/redshift'.format(self.sample1) + '/Nz_{0:}/Nz_{0:}_Mean/'.format(self.nz_method)), '\n') print('Loaded redshift distributions from (sample2): \n', os.path.join( self.data_directory, 'KV450_{:}/redshift'.format(self.sample2) + '/Nz_{0:}/Nz_{0:}_Mean/'.format(self.nz_method)), '\n') z_samples1 = np.asarray(z_samples1) hist_samples1 = np.asarray(hist_samples1) z_samples2 = np.asarray(z_samples2) hist_samples2 = np.asarray(hist_samples2) self.z_samples = [z_samples1, z_samples2] self.hist_samples = [hist_samples1, hist_samples2] # we assume two sub-samples have the same redshift-spacing self.nzmax = z_samples1.shape[1] # requires that z-spacing is always the same for all bins... self.z_p = z_samples1[0, :] print('Redshift integrations performed at resolution of redshift distribution histograms! \n') self.zmax = self.z_p.max() self.need_cosmo_arguments(data, {'z_max_pk': self.zmax}) # redshift offsets if 'D_z1_1' in data.mcmc_parameters: # naive duplicated sets self.z_offset = 'duplicated' print("Redshift offsets strategy: naive duplicated sets.") elif 'D_z1_m' in data.mcmc_parameters: # mean and shift self.z_offset = 'mean' print("Redshift offsets strategy: using mean and shift fitting.") elif 'D_z1' in data.mcmc_parameters: # common set self.z_offset = 'common' print("Redshift offsets strategy: using common set.") else: # no offsets self.z_offset = 'none' print("Redshift offsets strategy: no offsets") ################################################ # intrinsic alignment ################################################ if 'A_IA_1' in data.mcmc_parameters: self.A_IA = "duplicated" print("A_IA strategy: naive duplicated sets.") elif 'A_IA_m' in data.mcmc_parameters: # mean and shift self.A_IA = 'mean' print("A_IA strategy: using mean and shift fitting.") elif 'A_IA' in data.mcmc_parameters: self.A_IA = "common" print("A_IA is common.") else: self.A_IA = None prit("A_IA is not used.") ################################################ # discrete theta values (to convert C_l to xi's) ################################################ if self.use_theory_binning: thetamin = np.min(self.theta_bin_min_val) * 0.8 thetamax = np.max(self.theta_bin_max_val) * 1.2 else: thetamin = np.min(self.theta_bins) * 0.8 thetamax = np.max(self.theta_bins) * 1.2 if self.integrate_Bessel_with == 'fftlog': try: import pycl2xi.fftlog as fftlog except: print('FFTLog was requested as integration method for the Bessel functions but is not installed. \n Download it from "https://github.com/tilmantroester/pycl2xi" and follow the installation instructions there (also requires the fftw3 library). \n Aborting run now... \n') exit() # this has to be declared a self, otherwise fftlog won't be available self.Cl2xi = fftlog.Cl2xi if self.integrate_Bessel_with == 'brute_force': # we redefine these settings so that lll for Bessel integration corresponds # to range that was used when comparing to CCL self.xmax = 100. self.dx_below_threshold = 0.02 self.dx_above_threshold = 0.07 self.dx_threshold = 0.2 self.dlntheta = 0.12 self.nthetatot = np.ceil(math.log(thetamax / thetamin) / self.dlntheta) + 1 self.nthetatot = np.int32(self.nthetatot) self.theta = np.zeros(self.nthetatot, 'float64') self.a2r = math.pi / (180. * 60.) # define an array of thetas for it in xrange(self.nthetatot): self.theta[it] = thetamin * math.exp(self.dlntheta * it) if self.integrate_Bessel_with in ['brute_force', 'cut_off']: ################################################################ # discrete l values used in the integral to convert C_l to xi's) ################################################################ # l = x / theta / self.a2r # x = l * theta * self.a2r # We start by considering the largest theta, theta[-1], and for that value we infer # a list of l's from the requirement that corresponding x values are spaced linearly with a given stepsize, until xmax. # Then we loop over smaller theta values, in decreasing order, and for each of them we complete the previous list of l's, # always requiuring the same dx stepsize (so that dl does vary) up to xmax. # # We first apply this to a running value ll, in order to count the total numbner of ll's, called nl. # Then we create the array lll[nl] and we fill it with the same values. # # we also compute on the fly the critical index il_max[it] such that ll[il_max[it]]*self.theta[it]*self.a2r # is the first value of x above xmax ll=1. il=0 while (ll*self.theta[-1]*self.a2r < self.dx_threshold): ll += self.dx_below_threshold/self.theta[-1]/self.a2r il += 1 for it in xrange(self.nthetatot): while (ll*self.theta[self.nthetatot-1-it]*self.a2r < self.xmax) and (ll+self.dx_above_threshold/self.theta[self.nthetatot-1-it]/self.a2r < self.lmax): ll += self.dx_above_threshold/self.theta[self.nthetatot-1-it]/self.a2r il += 1 self.nl = il+1 self.lll = np.zeros(self.nl, 'float64') self.il_max = np.zeros(self.nthetatot, 'int') il=0 self.lll[il]=1. while (self.lll[il]*self.theta[-1]*self.a2r < self.dx_threshold): il += 1 self.lll[il] = self.lll[il-1] + self.dx_below_threshold/self.theta[-1]/self.a2r for it in xrange(self.nthetatot): while (self.lll[il]*self.theta[self.nthetatot-1-it]*self.a2r < self.xmax) and (self.lll[il] + self.dx_above_threshold/self.theta[self.nthetatot-1-it]/self.a2r < self.lmax): il += 1 self.lll[il] = self.lll[il-1] + self.dx_above_threshold/self.theta[self.nthetatot-1-it]/self.a2r self.il_max[self.nthetatot-1-it] = il # finally we compute the array l*dl that will be used in the trapezoidal integration # (l is a factor in the integrand [l * C_l * Bessel], and dl is like a weight) self.ldl = np.zeros(self.nl, 'float64') self.ldl[0]=self.lll[0]*0.5*(self.lll[1]-self.lll[0]) for il in xrange(1,self.nl-1): self.ldl[il]=self.lll[il]*0.5*(self.lll[il+1]-self.lll[il-1]) self.ldl[-1]=self.lll[-1]*0.5*(self.lll[-1]-self.lll[-2]) else: # this is sufficient (FFTLog only uses 5k points internally anyways...) ell_lin = np.arange(1., 501., 1) ell_log = np.logspace(np.log10(501.), np.log10(self.lmax), 5000 - len(ell_lin)) self.lll = np.concatenate((ell_lin, ell_log)) # linspace --> overkill and too slow! #self.lll = np.arange(1., self.lmax + 1., 1) self.nl = self.lll.size # here we set up arrays and some integrations necessary for the theory binning: if self.use_theory_binning: if self.read_weight_func_for_binning: fname = os.path.join(self.data_directory, self.theory_weight_func_file) thetas, weights = np.loadtxt(fname, unpack=True) self.theory_weight_func = itp.splrep(thetas, weights) else: thetas = np.linspace(self.theta_bin_min_val, self.theta_bin_max_val, self.ntheta * int(self.theta_nodes_theory)) weights = self.a2r * thetas * self.theory_binning_const self.theory_weight_func = itp.splrep(thetas, weights) # first get the theta-bin borders based on ntheta and absolute min and absolute max values a = np.linspace(np.log10(self.theta_bin_min_val), np.log10(self.theta_bin_max_val), self.ntheta + 1) theta_bins = 10.**a self.theta_bin_min = theta_bins[:-1] self.theta_bin_max = theta_bins[1:] self.int_weight_func = np.zeros(self.ntheta) self.thetas_for_theory_binning = np.zeros((self.ntheta, int(self.theta_nodes_theory))) for idx_theta in xrange(self.ntheta): theta = np.linspace(self.theta_bin_min[idx_theta], self.theta_bin_max[idx_theta], int(self.theta_nodes_theory)) dtheta = (theta[1:] - theta[:-1]) * self.a2r weight_func_integrand = itp.splev(theta, self.theory_weight_func) self.int_weight_func[idx_theta] = np.sum(0.5 * (weight_func_integrand[1:] + weight_func_integrand[:-1]) * dtheta) # for convenience: self.thetas_for_theory_binning[idx_theta, :] = theta ################################################ # cosmo calculation ################################################ # Importing the python-wrapped CLASS from the correct folder, defined in # the .conf file, or overwritten at this point by the log.param. # If the cosmological code is CLASS, do the following to import all # relevant quantities try: classy_path = '' for elem in os.listdir(os.path.join( data.path['cosmo'], "python", "build")): if elem.find("lib.") != -1: classy_path = os.path.join( data.path['cosmo'], "python", "build", elem) break except OSError: raise io_mp.ConfigurationError( "You probably did not compile the python wrapper of CLASS. " + "Please go to /path/to/class/python/ and do\n" + "..]$ python setup.py build") # Inserting the previously found path into the list of folders to # search for python modules. sys.path.insert(1, classy_path) try: from classy import Class except ImportError: raise io_mp.MissingLibraryError( "You must have compiled the classy.pyx file. Please go to " + "/path/to/class/python and run the command\n " + "python setup.py build") cosmo = Class() print('Intitial cosmological parameters passed to CLASS code:') print(data.cosmo_arguments) # Prepare the cosmological module with the input parameters cosmo.set(data.cosmo_arguments) cosmo.compute(["lensing"]) # Omega_m contains all species! self.Omega_m = cosmo.Omega_m() self.small_h = cosmo.h() print('Omega_m =', self.Omega_m) print('h =', self.small_h) # One wants to obtain here the relation between z and r, this is done # by asking the cosmological module with the function z_of_r self.r, self.dzdr = cosmo.z_of_r(self.z_p) # linear growth rate self.rho_crit = self.get_critical_density() # derive the linear growth factor D(z) self.linear_growth_rate = np.zeros_like(self.z_p) #print(self.redshifts) for index_z, z in enumerate(self.z_p): # for CLASS ver >= 2.6: self.linear_growth_rate[index_z] = cosmo.scale_independent_growth_factor(z) # normalize to unity at z=0: # for CLASS ver >= 2.6: self.linear_growth_rate /= cosmo.scale_independent_growth_factor(0.) # Get power spectrum P(k=l/r,z(r)) from cosmological module #self.pk_dm = np.zeros_like(self.pk) self.pk = np.zeros((self.nlmax, self.nzmax), 'float64') self.pk_lin = np.zeros((self.nlmax, self.nzmax), 'float64') kmax_in_inv_Mpc = self.k_max_h_by_Mpc * self.small_h for index_l in xrange(self.nlmax): for index_z in xrange(1, self.nzmax): k_in_inv_Mpc = (self.l[index_l] + 0.5) / self.r[index_z] if (k_in_inv_Mpc > kmax_in_inv_Mpc): pk_dm = 0. pk_lin_dm = 0. else: pk_dm = cosmo.pk(k_in_inv_Mpc, self.z_p[index_z]) pk_lin_dm = cosmo.pk_lin(k_in_inv_Mpc, self.z_p[index_z]) self.pk[index_l, index_z] = pk_dm self.pk_lin[index_l, index_z] = pk_lin_dm return def __load_public_data_vector(self, id_sample): """ Read data vector and bring it into the desired format """ if id_sample == '1': data_sample = self.sample1 elif id_sample == '2': data_sample = self.sample2 else: raise Exception("Unexpected data_sample id (should be '1' or '2') ! ") # plus one for theta-column data_xip = np.zeros((self.ntheta, self.nzcorrs + 1)) data_xim = np.zeros((self.ntheta, self.nzcorrs + 1)) idx_corr = 0 for zbin1 in xrange(self.nzbins): for zbin2 in xrange(zbin1, self.nzbins): fname = os.path.join(self.data_directory, 'KV450_{:}/data_vector/xi_for_cosmo_tomo_{:}_{:}_withK_{:}.dat'.format(data_sample, zbin1+1, zbin2+1, data_sample)) theta, xip, xim = np.loadtxt(fname, unpack=True) # this assumes theta is the same for every tomographic bin and # for both xi_p and xi_m! if idx_corr == 0: data_xip[:, 0] = theta data_xim[:, 0] = theta data_xip[:, idx_corr + 1] = xip data_xim[:, idx_corr + 1] = xim idx_corr += 1 data = np.concatenate((data_xip, data_xim)) print('Loaded data vectors from: \n', os.path.join(self.data_directory, 'KV450_{:}/'.format(data_sample)), '\n') return data def __load_public_cov_mat(self, id_cov): """ Read in the full covariance matrix and to bring it into format of self.xi_obs. """ if id_cov == '11': cov_file = self.cov11_file elif id_cov == '12': cov_file = self.cov12_file elif id_cov == '22': cov_file = self.cov22_file else: raise Exception("Unexpected covariance id (should be '11', '12' or '22') ! ") fname = os.path.join(self.data_directory, 'cov_for_joint/{:}'.format(cov_file)) if os.path.exists(fname): matrix = np.loadtxt(fname) print('Loaded covariance matrix (incl. shear calibration uncertainty) in a format usable with this likelihood from: \n', fname, '\n') else: raise Exception("cov_mat file not found:\n %s"%fname) return matrix def __get_mask(self, cut_values): mask = np.zeros(2 * self.nzcorrs * self.ntheta) iz = 0 for izl in xrange(self.nzbins): for izh in xrange(izl, self.nzbins): # this counts the bin combinations # iz=1 =>(1,1), iz=2 =>(1,2) etc iz = iz + 1 for i in xrange(self.ntheta): j = (iz-1)*2*self.ntheta #xi_plus_cut = max(cut_values[izl, 0], cut_values[izh, 0]) xi_plus_cut_low = max(cut_values[izl, 0], cut_values[izh, 0]) xi_plus_cut_high = max(cut_values[izl, 1], cut_values[izh, 1]) #xi_minus_cut = max(cut_values[izl, 1], cut_values[izh, 1]) xi_minus_cut_low = max(cut_values[izl, 2], cut_values[izh, 2]) xi_minus_cut_high = max(cut_values[izl, 3], cut_values[izh, 3]) if ((self.theta_bins[i] < xi_plus_cut_high) and (self.theta_bins[i]>xi_plus_cut_low)): mask[j+i] = 1 if ((self.theta_bins[i] < xi_minus_cut_high) and (self.theta_bins[i]>xi_minus_cut_low)): mask[self.ntheta + j+i] = 1 return mask def __get_xi_obs(self, temp): """ This function takes xi_pm as read in from the data file and constructs the xi_pm vector in its observed ordering: xi_obs = {xi_p(theta1, z1xz1)... xi_p(thetaK, z1xz1), xi_m(theta1, z1xz1)... xi_m(thetaK, z1xz1);... xi_p(theta1, zNxzN)... xi_p(thetaK, zNxzN), xi_m(theta1, zNxzN)... xi_m(thetaK, zNxN)} """ xi_obs = np.zeros(self.ntheta * self.nzcorrs * 2) # create the data-vector: k = 0 for j in xrange(self.nzcorrs): for i in xrange(2 * self.ntheta): xi_obs[k] = temp[i, j] k += 1 return xi_obs def __get_xi_p_and_xi_m(self, vec_old): """ This function takes a xi_pm vector in the observed ordering (as it comes out of the __get_xi_obs-function for example) and splits it again in its xi_p and xi_m parts. """ ''' tmp = np.zeros((2 * self.ntheta, self.nzbins, self.nzbins), 'float64') vec1_new = np.zeros((self.ntheta, self.nzbins, self.nzbins), 'float64') vec2_new = np.zeros((self.ntheta, self.nzbins, self.nzbins), 'float64') index_corr = 0 for index_zbin1 in xrange(self.nzbins): for index_zbin2 in xrange(index_zbin1, self.nzbins): #for index_theta in xrange(ntheta): index_low = 2 * self.ntheta * index_corr index_high = 2 * self.ntheta * index_corr + 2 * self.ntheta #print(index_low, index_high) tmp[:, index_zbin1, index_zbin2] = vec_old[index_low:index_high] vec1_new[:, index_zbin1, index_zbin2] = tmp[:self.ntheta, index_zbin1, index_zbin2] vec2_new[:, index_zbin1, index_zbin2] = tmp[self.ntheta:, index_zbin1, index_zbin2] index_corr += 1 ''' tmp = np.zeros((2 * self.ntheta, self.nzcorrs), 'float64') vec1_new = np.zeros((self.ntheta, self.nzcorrs), 'float64') vec2_new = np.zeros((self.ntheta, self.nzcorrs), 'float64') for index_corr in xrange(self.nzcorrs): index_low = 2 * self.ntheta * index_corr index_high = 2 * self.ntheta * index_corr + 2 * self.ntheta #print(index_low, index_high) tmp[:, index_corr] = vec_old[index_low:index_high] vec1_new[:, index_corr] = tmp[:self.ntheta, index_corr] vec2_new[:, index_corr] = tmp[self.ntheta:, index_corr] return vec1_new, vec2_new def baryon_feedback_bias_sqr(self, k, z, A_bary=1.): """ Fitting formula for baryon feedback after equation 10 and Table 2 from <NAME> al. 2014 (arXiv.1407.4301) """ # k is expected in h/Mpc and is divided in log by this unit... x = np.log10(k) a = 1. / (1. + z) a_sqr = a * a constant = {'AGN': {'A2': -0.11900, 'B2': 0.1300, 'C2': 0.6000, 'D2': 0.002110, 'E2': -2.0600, 'A1': 0.30800, 'B1': -0.6600, 'C1': -0.7600, 'D1': -0.002950, 'E1': 1.8400, 'A0': 0.15000, 'B0': 1.2200, 'C0': 1.3800, 'D0': 0.001300, 'E0': 3.5700}, 'REF': {'A2': -0.05880, 'B2': -0.2510, 'C2': -0.9340, 'D2': -0.004540, 'E2': 0.8580, 'A1': 0.07280, 'B1': 0.0381, 'C1': 1.0600, 'D1': 0.006520, 'E1': -1.7900, 'A0': 0.00972, 'B0': 1.1200, 'C0': 0.7500, 'D0': -0.000196, 'E0': 4.5400}, 'DBLIM': {'A2': -0.29500, 'B2': -0.9890, 'C2': -0.0143, 'D2': 0.001990, 'E2': -0.8250, 'A1': 0.49000, 'B1': 0.6420, 'C1': -0.0594, 'D1': -0.002350, 'E1': -0.0611, 'A0': -0.01660, 'B0': 1.0500, 'C0': 1.3000, 'D0': 0.001200, 'E0': 4.4800}} A_z = constant[self.baryon_model]['A2']*a_sqr+constant[self.baryon_model]['A1']*a+constant[self.baryon_model]['A0'] B_z = constant[self.baryon_model]['B2']*a_sqr+constant[self.baryon_model]['B1']*a+constant[self.baryon_model]['B0'] C_z = constant[self.baryon_model]['C2']*a_sqr+constant[self.baryon_model]['C1']*a+constant[self.baryon_model]['C0'] D_z = constant[self.baryon_model]['D2']*a_sqr+constant[self.baryon_model]['D1']*a+constant[self.baryon_model]['D0'] E_z = constant[self.baryon_model]['E2']*a_sqr+constant[self.baryon_model]['E1']*a+constant[self.baryon_model]['E0'] # only for debugging; tested and works! #print('AGN: A2=-0.11900, B2= 0.1300, C2= 0.6000, D2= 0.002110, E2=-2.0600') #print(self.baryon_model+': A2={:.5f}, B2={:.5f}, C2={:.5f}, D2={:.5f}, E2={:.5f}'.format(constant[self.baryon_model]['A2'], constant[self.baryon_model]['B2'], constant[self.baryon_model]['C2'],constant[self.baryon_model]['D2'], constant[self.baryon_model]['E2'])) # original formula: #bias_sqr = 1.-A_z*np.exp((B_z-C_z)**3)+D_z*x*np.exp(E_z*x) # original formula with a free amplitude A_bary: bias_sqr = 1. - A_bary * (A_z * np.exp((B_z * x - C_z)**3) - D_z * x * np.exp(E_z * x)) return bias_sqr def get_IA_factor(self, z, linear_growth_rate, amplitude, exponent): const = 5e-14 / self.small_h**2 # Mpc^3 / M_sol # arbitrary convention z0 = 0.3 #print(utils.growth_factor(z, self.Omega_m)) #print(self.rho_crit) factor = -1. * amplitude * const * self.rho_crit * self.Omega_m / linear_growth_rate * ((1. + z) / (1. + z0))**exponent return factor def get_critical_density(self): """ The critical density of the Universe at redshift 0. Returns ------- rho_crit in solar masses per cubic Megaparsec. """ # yay, constants... Mpc_cm = 3.08568025e24 # cm M_sun_g = 1.98892e33 # g G_const_Mpc_Msun_s = M_sun_g * (6.673e-8) / Mpc_cm**3. H100_s = 100. / (Mpc_cm * 1.0e-5) # s^-1 rho_crit_0 = 3. * (self.small_h * H100_s)**2. / (8. * np.pi * G_const_Mpc_Msun_s) return rho_crit_0 def loglkl(self, cosmo, data): # get all cosmology dependent quantities here: xi_theo_1 = self.cosmo_calculations(data, np.size(self.xi_obs_1), sample_index = 1) xi_theo_2 = self.cosmo_calculations(data, np.size(self.xi_obs_2), sample_index = 2) # final chi2 vec = np.concatenate((xi_theo_1, xi_theo_2))[self.mask_indices] - np.concatenate((self.xi_obs_1, self.xi_obs_2))[self.mask_indices] if np.isinf(vec).any() or np.isnan(vec).any(): chi2 = 2e12 else: # don't invert that matrix... # use the Cholesky decomposition instead: yt = solve_triangular(self.cholesky_transform, vec, lower=True) chi2 = yt.dot(yt) # enforce Gaussian priors on NUISANCE parameters if requested: if self.use_gaussian_prior_for_nuisance: for idx_nuisance, nuisance_name in enumerate(self.gaussian_prior_name): scale = data.mcmc_parameters[nuisance_name]['scale'] chi2 += (data.mcmc_parameters[nuisance_name]['current'] * scale - self.gaussian_prior_center[idx_nuisance])**2 / self.gaussian_prior_sigma[idx_nuisance]**2 return -chi2/2. def cosmo_calculations(self, data, size_xi_obs, sample_index): # needed for IA modelling: if self.A_IA == 'duplicated': param_name1 = 'A_IA_{:}'.format(sample_index) amp_IA = data.mcmc_parameters[param_name1]['current'] * data.mcmc_parameters[param_name1]['scale'] intrinsic_alignment = True elif self.A_IA == 'mean': param_name1 = 'A_IA_m' param_name2 = 'A_IA_s' amp_IA_mean = data.mcmc_parameters[param_name1]['current'] * data.mcmc_parameters[param_name1]['scale'] amp_IA_shift = data.mcmc_parameters[param_name2]['current'] * data.mcmc_parameters[param_name2]['scale'] if sample_index == 1: amp_IA = amp_IA_mean + amp_IA_shift elif sample_index == 2: amp_IA = amp_IA_mean - amp_IA_shift else: raise Exception("Unexpected sample_index in amp_IA !") intrinsic_alignment = True elif self.A_IA == 'common': param_name1 = 'A_IA' amp_IA = data.mcmc_parameters[param_name1]['current'] * data.mcmc_parameters[param_name1]['scale'] intrinsic_alignment = True else: intrinsic_alignment = False # exp_IA is not used exp_IA = 0. # Compute now the selection function p(r) = p(z) dz/dr normalized # to one. The np.newaxis helps to broadcast the one-dimensional array # dzdr to the proper shape. Note that p_norm is also broadcasted as # an array of the same shape as p_z # for KiDS-450 constant biases in photo-z are not sufficient: pz = np.zeros((self.nzmax, self.nzbins), 'float64') pz_norm = np.zeros(self.nzbins, 'float64') for zbin in xrange(self.nzbins): # redshift offset if self.z_offset == 'duplicated': param_name = 'D_z{:}_{:}'.format(zbin + 1, sample_index) z_mod = self.z_p + data.mcmc_parameters[param_name]['current'] * data.mcmc_parameters[param_name]['scale'] elif self.z_offset == 'mean': param_name1 = 'D_z{:}_m'.format(zbin + 1) param_name2 = 'D_z{:}_s'.format(zbin + 1) Dz_mean = data.mcmc_parameters[param_name1]['current'] * data.mcmc_parameters[param_name1]['scale'] Dz_shift = data.mcmc_parameters[param_name2]['current'] * data.mcmc_parameters[param_name2]['scale'] if sample_index == 1: z_mod = self.z_p + (Dz_mean + Dz_shift) elif sample_index == 2: z_mod = self.z_p + (Dz_mean - Dz_shift) else: raise Exception("Unexpected sample_index in redshift offsets!") elif self.z_offset == 'common': param_name = 'D_z{:}'.format(zbin + 1) z_mod = self.z_p + data.mcmc_parameters[param_name]['current'] * data.mcmc_parameters[param_name]['scale'] else: z_mod = self.z_p # the artificial zero-point is not included for spline spline_pz = itp.interp1d(self.z_samples[sample_index-1][zbin, 1:], self.hist_samples[sample_index-1][zbin, 1:], kind=self.type_redshift_interp) mask_min = z_mod >= self.z_samples[sample_index-1][zbin, 1:].min() mask_max = z_mod <= self.z_samples[sample_index-1][zbin, 1:].max() mask_z = mask_min & mask_max # points outside the z-range of the histograms are set to 0! pz[mask_z, zbin] = spline_pz(z_mod[mask_z]) # Normalize selection functions dz = self.z_p[1:] - self.z_p[:-1] pz_norm[zbin] = np.sum(0.5 * (pz[1:, zbin] + pz[:-1, zbin]) * dz) pr = pz * (self.dzdr[:, np.newaxis] / pz_norm) # nuisance parameter for m-correction (one value for all bins): # implemented tomography-friendly so it's very easy to implement a dm per z-bin from here! param_name = 'dm_{:}'.format(sample_index) if param_name in data.mcmc_parameters: dm_per_zbin = np.ones((self.ntheta, self.nzbins)) dm_per_zbin *= data.mcmc_parameters[param_name]['current'] * data.mcmc_parameters[param_name]['scale'] else: # so that nothing will change if we don't marginalize over dm! dm_per_zbin = np.zeros((self.ntheta, self.nzbins)) # nuisance parameters for constant c-correction: dc1_per_zbin = np.zeros((self.ntheta, self.nzbins)) dc2_per_zbin = np.zeros((self.ntheta, self.nzbins)) for zbin in xrange(self.nzbins): #param_name = 'dc_z{:}_{:}'.format(zbin + 1, sample_index) # param_name = 'dc_{:}'.format(sample_index) param_name = 'dc' if param_name in data.mcmc_parameters: dc1_per_zbin[:, zbin] = np.ones(self.ntheta) * data.mcmc_parameters[param_name]['current'] * data.mcmc_parameters[param_name]['scale'] # add here dc2 if xi- turns out to be affected! #dc2_per_zbin[zbin] = dc2_per_zbin[zbin] # correlate dc1/2_per_zbin in tomographic order of xi1/2: dc1_sqr = np.zeros((self.ntheta, self.nzcorrs)) dc2_sqr = np.zeros((self.ntheta, self.nzcorrs)) # correlate dm_per_zbin in tomographic order of xi1/2: dm_plus_one_sqr = np.zeros((self.ntheta, self.nzcorrs)) index_corr = 0 for zbin1 in xrange(self.nzbins): for zbin2 in xrange(zbin1, self.nzbins): # c-correction: dc1_sqr[:, index_corr] = dc1_per_zbin[:, zbin1] * dc1_per_zbin[:, zbin2] dc2_sqr[:, index_corr] = dc2_per_zbin[:, zbin1] * dc2_per_zbin[:, zbin2] # m-correction: dm_plus_one_sqr[:, index_corr] = (1. + dm_per_zbin[:, zbin1]) * (1. + dm_per_zbin[:, zbin2]) index_corr += 1 # get c-correction into form of xi_obs temp = np.concatenate((dc1_sqr, dc2_sqr)) dc_sqr = self.__get_xi_obs(temp) # get m-correction into form of xi_obs temp = np.concatenate((dm_plus_one_sqr, dm_plus_one_sqr)) dm_plus_one_sqr_obs = self.__get_xi_obs(temp) # Below we construct a theta-dependent c-correction function from # measured data (for one z-bin) and scale it with an amplitude per z-bin # which is to be fitted # this is all independent of the constant c-correction calculated above xip_c = np.zeros((self.ntheta, self.nzcorrs)) xim_c = np.zeros((self.ntheta, self.nzcorrs)) if self.use_cterm_function: amps_cfunc = np.ones(self.nzbins) for zbin in xrange(self.nzbins): # param_name = 'Ac_{:}'.format(sample_index) param_name = 'Ac' if param_name in data.mcmc_parameters: amps_cfunc[zbin] = data.mcmc_parameters[param_name]['current'] * data.mcmc_parameters[param_name]['scale'] index_corr = 0 for zbin1 in xrange(self.nzbins): for zbin2 in xrange(zbin1, self.nzbins): #sign = np.sign(amps_cfunc[zbin1]) * np.sign(amps_cfunc[zbin2]) #xip_c[:, index_corr] = sign * np.sqrt(np.abs(amps_cfunc[zbin1] * amps_cfunc[zbin2])) * self.xip_c_per_zbin xip_c[:, index_corr] = amps_cfunc[zbin1] * amps_cfunc[zbin2] * self.xip_c_per_zbin # TODO: we leave xim_c set to 0 for now! #xim_c[:, index_corr] = amps_cfunc[zbin1] * amps_cfunc[zbin2] * self.xim_c_per_zbin index_corr += 1 # get it into order of xi_obs # contains only zeros if function is not requested # TODO xim-component contains only zeros temp = np.concatenate((xip_c, xim_c)) xipm_c = self.__get_xi_obs(temp) # Compute function g_i(r), that depends on r and the bin # g_i(r) = 2r(1+z(r)) int_r^+\infty drs p_r(rs) (rs-r)/rs g = np.zeros((self.nzmax, self.nzbins), 'float64') for Bin in xrange(self.nzbins): # shift only necessary if z[0] = 0 for nr in xrange(1, self.nzmax - 1): #for nr in xrange(self.nzmax - 1): fun = pr[nr:, Bin] * (self.r[nr:] - self.r[nr]) / self.r[nr:] g[nr, Bin] = np.sum(0.5*(fun[1:] + fun[:-1]) * (self.r[nr+1:] - self.r[nr:-1])) g[nr, Bin] *= 2. * self.r[nr] * (1. + self.z_p[nr]) Cl_GG_integrand = np.zeros((self.nzmax, self.nzcorrs), 'float64') Cl_GG = np.zeros((self.nlmax, self.nzcorrs), 'float64') if intrinsic_alignment: Cl_II_integrand = np.zeros_like(Cl_GG_integrand) Cl_II = np.zeros_like(Cl_GG) Cl_GI_integrand = np.zeros_like(Cl_GG_integrand) Cl_GI = np.zeros_like(Cl_GG) dr = self.r[1:] - self.r[:-1] # Start loop over l for computation of C_l^shear # Start loop over l for computation of E_l for il in xrange(self.nlmax): # find Cl_integrand = (g(r) / r)**2 * P(l/r,z(r)) for Bin1 in xrange(self.nzbins): for Bin2 in xrange(Bin1, self.nzbins): Cl_GG_integrand[1:, self.one_dim_index(Bin1,Bin2)] = g[1:, Bin1] * g[1:, Bin2] / self.r[1:]**2 * self.pk[il, 1:] #print(self.Cl_integrand) if intrinsic_alignment: factor_IA = self.get_IA_factor(self.z_p, self.linear_growth_rate, amp_IA, exp_IA) #/ self.dzdr[1:] #print(F_of_x) #print(self.eta_r[1:, zbin1].shape) if self.use_linear_pk_for_IA: # this term (II) uses the linear matter power spectrum P_lin(k, z) Cl_II_integrand[1:, self.one_dim_index(Bin1,Bin2)] = pr[1:, Bin1] * pr[1:, Bin2] * factor_IA[1:]**2 / self.r[1:]**2 * self.pk_lin[il, 1:] # this term (GI) uses sqrt(P_lin(k, z) * P_nl(k, z)) Cl_GI_integrand[1:, self.one_dim_index(Bin1,Bin2)] = (g[1:, Bin1] * pr[1:, Bin2] + g[1:, Bin2] * pr[1:, Bin1]) * factor_IA[1:] / self.r[1:]**2 * np.sqrt(self.pk_lin[il, 1:] * self.pk[il, 1:]) else: # both II and GI terms use the non-linear matter power spectrum P_nl(k, z) Cl_II_integrand[1:, self.one_dim_index(Bin1,Bin2)] = pr[1:, Bin1] * pr[1:, Bin2] * factor_IA[1:]**2 / self.r[1:]**2 * self.pk[il, 1:] Cl_GI_integrand[1:, self.one_dim_index(Bin1,Bin2)] = (g[1:, Bin1] * pr[1:, Bin2] + g[1:, Bin2] * pr[1:, Bin1]) * factor_IA[1:] / self.r[1:]**2 * self.pk[il, 1:] # Integrate over r to get C_l^shear_ij = P_ij(l) # C_l^shear_ij = 9/16 Omega0_m^2 H_0^4 \sum_0^rmax dr (g_i(r) # g_j(r) /r**2) P(k=l/r,z(r)) dr # It is then multiplied by 9/16*Omega_m**2 # and then by (h/2997.9)**4 to be dimensionless # (since P(k)*dr is in units of Mpc**4) for Bin in xrange(self.nzcorrs): Cl_GG[il, Bin] = np.sum(0.5*(Cl_GG_integrand[1:, Bin] + Cl_GG_integrand[:-1, Bin]) * dr) Cl_GG[il, Bin] *= 9. / 16. * self.Omega_m**2 Cl_GG[il, Bin] *= (self.small_h / 2997.9)**4 if intrinsic_alignment: Cl_II[il, Bin] = np.sum(0.5 * (Cl_II_integrand[1:, Bin] + Cl_II_integrand[:-1, Bin]) * dr) Cl_GI[il, Bin] =
np.sum(0.5 * (Cl_GI_integrand[1:, Bin] + Cl_GI_integrand[:-1, Bin]) * dr)
numpy.sum
r""" Kernel module, implements a few standard kernels for use with the ``GaussianProcess`` class. Options include the Squared Exponential kernel and Matern 5/2 kernel, both with either a single correlation length (``UniformSqExp``, ``UniformMat52``) or correlation lengths for each input dimension (``SquaredExponential``, ``Matern52``). The product form of the Matern 5/2 kernel (``ProductMat52``) is also available. """ import numpy as np class KernelBase(object): "Base Kernel" def get_n_params(self, inputs): """ Determine number of correlation length parameters based on inputs Determines the number of parameters required for a given set of inputs. Returns the number of parameters as an integer. :param inputs: Set of inputs for which the number of correlation length parameters is desired. :type inputs: ndarray :returns: Number of correlation length parameters :rtype: int """ inputs = np.array(inputs) assert inputs.ndim == 2, "Inputs must be a 2D array" return inputs.shape[1] def _check_inputs(self, x1, x2, params): r""" Common function for checking dimensions of inputs (default version) This function checks the inputs to any kernel evaluation for consistency and ensures that all input arrays have the correct dimensionality. It returns the reformatted arrays, the number of inputs, and the number of hyperparameters. If the method determines that the array dimensions are not all consistent with one another, it will raise an ``AssertionError``. This method is called internally whenever the kernel is evaluated. :param x1: First parameter array. Should be a 1-D or 2-D array (1-D is acceptable if either there is only a single point, or each point has only a single parameter). If there is more than one parameter, the last dimension must match the last dimension of ``x2`` and be one less than the length of ``params``. :type x1: array-like :param x2: Second parameter array. The same restrictions apply that hold for ``x1`` described above. :type x2: array-like :param params: Hyperparameter array. Must have a length that is one more than the last dimension of ``x1`` and ``x2``, hence minimum length is 2. :type params: array-like :returns: A tuple containing the following: reformatted ``x1``, ``n1``, reformatted ``x2``, ``n2``, ``params``, and ``D``. ``x1`` will be an array with dimensions ``(n1, D - 1)``, ``x2`` will be an array with dimensions ``(n2, D - 1)``, and ``params`` will be an array with dimensions ``(D,)``. ``n1``, ``n2``, and ``D`` will be integers. """ params = np.array(params) assert params.ndim == 1, "parameters must be a vector" D = len(params) assert D >= 1, "minimum number of parameters in a covariance kernel is 1" x1 = np.array(x1) assert x1.ndim == 1 or x1.ndim == 2, "bad number of dimensions in input x1" if x1.ndim == 2: assert x1.shape[1] == D, "bad shape for x1" else: if D == 1: x1 = np.reshape(x1, (len(x1), 1)) else: x1 = np.reshape(x1, (1, D)) n1 = x1.shape[0] x2 = np.array(x2) assert x2.ndim == 1 or x2.ndim == 2, "bad number of dimensions in input x2" if x2.ndim == 2: assert x2.shape[1] == D, "bad shape for x2" else: if D == 1: x2 = np.reshape(x2, (len(x2), 1)) else: x2 = np.reshape(x2, (1, D)) n2 = x2.shape[0] return x1, n1, x2, n2, params, D def kernel_f(self, x1, x2, params): r""" Compute kernel values for a set of inputs Returns the value of the kernel for two sets of input points and a choice of hyperparameters. This function should not need to be modified for different choices of the kernel function or distance metric, as after checking the inputs it simply calls the routine to compute the distance metric and then evaluates the kernel function for those distances. :param x1: First input array. Must be a 1-D or 2-D array, with the length of the last dimension matching the last dimension of ``x2`` and one less than the length of ``params``. ``x1`` may be 1-D if either each point consists of a single parameter (and ``params`` has length 2) or the array only contains a single point (in which case, the array will be reshaped to ``(1, D - 1)``). :type x1: array-like :param x2: Second input array. The same restrictions that apply to ``x1`` also apply here. :type x2: array-like :param params: Hyperparameter array. Must be 1-D with length one greater than the last dimension of ``x1`` and ``x2``. :type params: array-like :returns: Array holding all kernel values between points in arrays ``x1`` and ``x2``. Will be an array with shape ``(n1, n2)``, where ``n1`` is the length of the first axis of ``x1`` and ``n2`` is the length of the first axis of ``x2``. :rtype: ndarray """ x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params) return self.calc_K(self.calc_r2(x1, x2, params)) def kernel_deriv(self, x1, x2, params): r""" Compute kernel gradient for a set of inputs Returns the value of the kernel gradient for two sets of input points and a choice of hyperparameters. This function should not need to be modified for different choices of the kernel function or distance metric, as after checking the inputs it simply calls the routine to compute the distance metric, kernel function, and the appropriate derivative functions of the distance and kernel functions. :param x1: First input array. Must be a 1-D or 2-D array, with the length of the last dimension matching the last dimension of ``x2`` and one less than the length of ``params``. ``x1`` may be 1-D if either each point consists of a single parameter (and ``params`` has length 2) or the array only contains a single point (in which case, the array will be reshaped to ``(1, D - 1)``). :type x1: array-like :param x2: Second input array. The same restrictions that apply to ``x1`` also apply here. :type x2: array-like :param params: Hyperparameter array. Must be 1-D with length one greater than the last dimension of ``x1`` and ``x2``. :type params: array-like :returns: Array holding the gradient of the kernel function between points in arrays ``x1`` and ``x2`` with respect to the hyperparameters. Will be an array with shape ``(D, n1, n2)``, where ``D`` is the length of ``params``, ``n1`` is the length of the first axis of ``x1`` and ``n2`` is the length of the first axis of ``x2``. The first axis indicates the different derivative components (i.e. the derivative with respect to the first parameter is [0,:,:], etc.) :rtype: ndarray """ x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params) dKdr2 = self.calc_dKdr2(self.calc_r2(x1, x2, params)) dr2dtheta = self.calc_dr2dtheta(x1, x2, params) dKdtheta = dKdr2*dr2dtheta return dKdtheta def kernel_hessian(self, x1, x2, params): r""" Calculate the Hessian of the kernel evaluated for all pairs of points with respect to the hyperparameters Returns the value of the kernel Hessian for two sets of input points and a choice of hyperparameters. This function should not need to be modified for different choices of the kernel function or distance metric, as after checking the inputs it simply calls the routine to compute the distance metric, kernel function, and the appropriate derivative functions of the distance and kernel functions. :param x1: First input array. Must be a 1-D or 2-D array, with the length of the last dimension matching the last dimension of ``x2`` and one less than the length of ``params``. ``x1`` may be 1-D if either each point consists of a single parameter (and ``params`` has length 2) or the array only contains a single point (in which case, the array will be reshaped to ``(1, D - 1)``). :type x1: array-like :param x2: Second input array. The same restrictions that apply to ``x1`` also apply here. :type x2: array-like :param params: Hyperparameter array. Must be 1-D with length one greater than the last dimension of ``x1`` and ``x2``. :type params: array-like :returns: Array holding the Hessian of the pair-wise distances between points in arrays ``x1`` and ``x2`` with respect to the hyperparameters. Will be an array with shape ``(D, D, n1, n2)``, where ``D`` is the length of ``params``, ``n1`` is the length of the first axis of ``x1`` and ``n2`` is the length of the first axis of ``x2``. The first two axes indicates the different derivative components (i.e. the second derivative with respect to the first parameter is [0,0,:,:], the mixed partial with respect to the first and second parameters is [0,1,:,:] or [1,0,:,:], etc.) :rtype: ndarray """ x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params) r2_matrix = self.calc_r2(x1, x2, params) dKdr2 = self.calc_dKdr2(r2_matrix) d2Kdr22 = self.calc_d2Kdr22(r2_matrix) dr2dtheta = self.calc_dr2dtheta(x1, x2, params) d2r2dtheta2 = self.calc_d2r2dtheta2(x1, x2, params) d2Kdtheta2 = (d2Kdr22 * dr2dtheta[:,np.newaxis,:,:] * dr2dtheta[np.newaxis,:,:,:] + dKdr2 * d2r2dtheta2) return d2Kdtheta2 class UniformKernel(KernelBase): r""" Kernel with a single correlation length """ def get_n_params(self, inputs): """ Determine number of correlation length parameters based on inputs Determines the number of parameters required for a given set of inputs. Returns the number of parameters as an integer. :param inputs: Set of inputs for which the number of correlation length parameters is desired. :type inputs: ndarray :returns: Number of correlation length parameters :rtype: int """ return 1 def _check_inputs(self, x1, x2, params): r""" Common function for checking dimensions of inputs This function checks the inputs to any kernel evaluation for consistency and ensures that all input arrays have the correct dimensionality. It returns the reformatted arrays, the number of inputs, and the number of hyperparameters. If the method determines that the array dimensions are not all consistent with one another, it will raise an ``AssertionError``. This method is called internally whenever the kernel is evaluated. :param x1: First parameter array. Should be a 1-D or 2-D array (1-D is acceptable if each point has only a single input parameter). :type x1: array-like :param x2: Second parameter array. The same restrictions apply that hold for ``x1`` described above. :type x2: array-like :param params: Hyperparameter array. Must have a length that is one more than the last dimension of ``x1`` and ``x2``, hence minimum length is 2. :type params: array-like :returns: A tuple containing the following: reformatted ``x1``, ``n1``, reformatted ``x2``, ``n2``, ``params``, and ``D``. ``n1``, ``n2``, and ``D`` will be integers. """ params = np.array(params) assert params.ndim == 1, "parameters must be a vector" D = len(params) assert D == 1, "Uniform kernels only support a single correlation length" x1 = np.array(x1) assert x1.ndim == 1 or x1.ndim == 2, "bad number of dimensions in input x1" if not x1.ndim == 2: x1 = np.reshape(x1, (-1, 1)) n1 = x1.shape[0] x2 = np.array(x2) assert x2.ndim == 1 or x2.ndim == 2, "bad number of dimensions in input x2" if not x2.ndim == 2: x2 = np.reshape(x2, (-1, 1)) n2 = x2.shape[0] assert x1.shape[1] == x2.shape[1], "Input arrays do not have the same number of inputs" return x1, n1, x2, n2, params, D def calc_r2(self, x1, x2, params): r""" Calculate squared distance between all pairs of points This method computes the scaled Euclidean distance between all pairs of points in ``x1`` and ``x2``. For example, if ``x1 = [1.]``, ``x2`` = [2.], and ``params = [2.]`` then ``calc_r`` would return :math:`{\sqrt{exp(2)*(1 - 2)^2}=\sqrt{exp(2)}}` as an array with shape ``(1,1)``. :param x1: First input array. Must be a 1-D or 2-D array, with the length of the last dimension matching the last dimension of ``x2`` and one less than the length of ``params``. ``x1`` may be 1-D if either each point consists of a single parameter (and ``params`` has length 2) or the array only contains a single point (in which case, the array will be reshaped to ``(1, D - 1)``). :type x1: array-like :param x2: Second input array. The same restrictions that apply to ``x1`` also apply here. :type x2: array-like :param params: Hyperparameter array. Must be 1-D with length one greater than the last dimension of ``x1`` and ``x2``. :type params: array-like :returns: Array holding all pair-wise squared distances between points in arrays ``x1`` and ``x2``. Will be an array with shape ``(n1, n2)``, where ``n1`` is the length of the first axis of ``x1`` and ``n2`` is the length of the first axis of ``x2``. :rtype: ndarray """ x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params) exp_theta = np.exp(params)[0] r2_matrix = np.sum(exp_theta*(x1[:, np.newaxis, :] - x2[np.newaxis, :, :])**2, axis=-1) if np.any(np.isinf(r2_matrix)): raise FloatingPointError("Inf enountered in kernel distance computation") return r2_matrix def calc_dr2dtheta(self, x1, x2, params): r""" Calculate the first derivative of the distance between all pairs of points with respect to the hyperparameters This method computes the derivative of the scaled Euclidean distance between all pairs of points in ``x1`` and ``x2`` with respect to the hyperparameters. The gradient is held in an array with shape ``(D, n1, n2)``, where ``D`` is the length of ``params``, ``n1`` is the length of the first axis of ``x1``, and ``n2`` is the length of the first axis of ``x2``. This is used in the computation of the gradient and Hessian of the kernel. The first index represents the different derivatives with respect to each hyperparameter. :param x1: First input array. Must be a 1-D or 2-D array, with the length of the last dimension matching the last dimension of ``x2`` and one less than the length of ``params``. ``x1`` may be 1-D if either each point consists of a single parameter (and ``params`` has length 2) or the array only contains a single point (in which case, the array will be reshaped to ``(1, D - 1)``). :type x1: array-like :param x2: Second input array. The same restrictions that apply to ``x1`` also apply here. :type x2: array-like :param params: Hyperparameter array. Must be 1-D with length one greater than the last dimension of ``x1`` and ``x2``. :type params: array-like :returns: Array holding the derivative of the pair-wise distances between points in arrays ``x1`` and ``x2`` with respect to the hyperparameters. Will be an array with shape ``(D, n1, n2)``, where ``D`` is the length of ``params``, ``n1`` is the length of the first axis of ``x1`` and ``n2`` is the length of the first axis of ``x2``. The first axis indicates the different derivative components (i.e. the derivative with respect to the first parameter is [0,:,:], etc.) :rtype: ndarray """ x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params) return np.reshape(self.calc_r2(x1, x2, params), (1, n1, n2)) def calc_d2r2dtheta2(self, x1, x2, params): r""" Calculate all second derivatives of the distance between all pairs of points with respect to the hyperparameters This method computes all second derivatives of the scaled Euclidean distance between all pairs of points in ``x1`` and ``x2`` with respect to the hyperparameters. The gradient is held in an array with shape ``(D, D, n1, n2)``, where ``D`` is the length of ``params``, ``n1`` is the length of the first axis of ``x1``, and ``n2`` is the length of the first axis of ``x2``. This is used in the computation of the gradient and Hessian of the kernel. The first two indices represents the different derivatives with respect to each hyperparameter. :param x1: First input array. Must be a 1-D or 2-D array, with the length of the last dimension matching the last dimension of ``x2`` and one less than the length of ``params``. ``x1`` may be 1-D if either each point consists of a single parameter (and ``params`` has length 2) or the array only contains a single point (in which case, the array will be reshaped to ``(1, D - 1)``). :type x1: array-like :param x2: Second input array. The same restrictions that apply to ``x1`` also apply here. :type x2: array-like :param params: Hyperparameter array. Must be 1-D with length one greater than the last dimension of ``x1`` and ``x2``. :type params: array-like :returns: Array holding the second derivatives of the pair-wise distances between points in arrays ``x1`` and ``x2`` with respect to the hyperparameters. Will be an array with shape ``(D, D, n1, n2)``, where ``D`` is the length of ``params``, ``n1`` is the length of the first axis of ``x1`` and ``n2`` is the length of the first axis of ``x2``. The first two axes indicates the different derivative components (i.e. the second derivative with respect to the first parameter is [0,0,:,:], the mixed partial with respect to the first and second parameters is [0,1,:,:] or [1,0,:,:], etc.) :rtype: ndarray """ x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params) return np.reshape(self.calc_r2(x1, x2, params), (1, 1, n1, n2)) class StationaryKernel(KernelBase): r""" Generic class representing a stationary kernel This base class implements the necessary scaffolding for defining a stationary kernel. Stationary kernels are only dependent on a distance measure between any two points, so the base class holds all the necessary information for doing the distance computation. Individual subclasses will implement the functional dependence of the kernel on the distance, plus first and second derivatives (if desired) to compute the gradient or Hessian of the kernel with respect to the hyperparameters. This implementation uses a scaled euclidean distance metric. Each individual parameter has a hyperparameter scale associated with it that is used in the distance computation. If a different metric is to be defined, a new base class needs to be defined that implements the ``calc_r``, and optionally ``calc_drdtheta`` and ``calc_d2rdtheta2`` methods if gradient or Hessian computation is desired. The methods ``kernel_f``, ``kernel_gradient``, and ``kernel_hessian`` can then be used to compute the appropriate quantities with no further modification. Note that the Kernel object just collates all of the methods together; the class itself does not hold any information on the data point or hyperparamters, which are passed directly to the appropriate methods. Thus, no information needs to be provided when creating a new ``Kernel`` instance. """ def calc_r2(self, x1, x2, params): r""" Calculate squared distance between all pairs of points This method computes the scaled Euclidean distance between all pairs of points in ``x1`` and ``x2``. Each component distance is multiplied by the exponential of the corresponding hyperparameter, prior to summing and taking the square root. For example, if ``x1 = [1.]``, ``x2`` = [2.], and ``params = [2., 2.]`` then ``calc_r`` would return :math:`{\sqrt{exp(2)*(1 - 2)^2}=\sqrt{exp(2)}}` as an array with shape ``(1,1)``. :param x1: First input array. Must be a 1-D or 2-D array, with the length of the last dimension matching the last dimension of ``x2`` and one less than the length of ``params``. ``x1`` may be 1-D if either each point consists of a single parameter (and ``params`` has length 2) or the array only contains a single point (in which case, the array will be reshaped to ``(1, D - 1)``). :type x1: array-like :param x2: Second input array. The same restrictions that apply to ``x1`` also apply here. :type x2: array-like :param params: Hyperparameter array. Must be 1-D with length one greater than the last dimension of ``x1`` and ``x2``. :type params: array-like :returns: Array holding all pair-wise squared distances between points in arrays ``x1`` and ``x2``. Will be an array with shape ``(n1, n2)``, where ``n1`` is the length of the first axis of ``x1`` and ``n2`` is the length of the first axis of ``x2``. :rtype: ndarray """ x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params) exp_theta = np.exp(params) r2_matrix = np.sum(exp_theta*(x1[:, np.newaxis, :] - x2[np.newaxis, :, :])**2, axis=-1) if np.any(np.isinf(r2_matrix)): raise FloatingPointError("Inf enountered in kernel distance computation") return r2_matrix def calc_dr2dtheta(self, x1, x2, params): r""" Calculate the first derivative of the distance between all pairs of points with respect to the hyperparameters This method computes the derivative of the scaled Euclidean distance between all pairs of points in ``x1`` and ``x2`` with respect to the hyperparameters. The gradient is held in an array with shape ``(D, n1, n2)``, where ``D`` is the length of ``params``, ``n1`` is the length of the first axis of ``x1``, and ``n2`` is the length of the first axis of ``x2``. This is used in the computation of the gradient and Hessian of the kernel. The first index represents the different derivatives with respect to each hyperparameter. :param x1: First input array. Must be a 1-D or 2-D array, with the length of the last dimension matching the last dimension of ``x2`` and one less than the length of ``params``. ``x1`` may be 1-D if either each point consists of a single parameter (and ``params`` has length 2) or the array only contains a single point (in which case, the array will be reshaped to ``(1, D - 1)``). :type x1: array-like :param x2: Second input array. The same restrictions that apply to ``x1`` also apply here. :type x2: array-like :param params: Hyperparameter array. Must be 1-D with length one greater than the last dimension of ``x1`` and ``x2``. :type params: array-like :returns: Array holding the derivative of the pair-wise distances between points in arrays ``x1`` and ``x2`` with respect to the hyperparameters. Will be an array with shape ``(D, n1, n2)``, where ``D`` is the length of ``params``, ``n1`` is the length of the first axis of ``x1`` and ``n2`` is the length of the first axis of ``x2``. The first axis indicates the different derivative components (i.e. the derivative with respect to the first parameter is [0,:,:], etc.) :rtype: ndarray """ x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params) exp_theta = np.exp(params) dr2dtheta = np.transpose(exp_theta*(x1[:, np.newaxis, :] - x2[np.newaxis, :, :])**2, (2, 0, 1)) return dr2dtheta def calc_d2r2dtheta2(self, x1, x2, params): r""" Calculate all second derivatives of the distance between all pairs of points with respect to the hyperparameters This method computes all second derivatives of the scaled Euclidean distance between all pairs of points in ``x1`` and ``x2`` with respect to the hyperparameters. The gradient is held in an array with shape ``(D, D, n1, n2)``, where ``D`` is the length of ``params``, ``n1`` is the length of the first axis of ``x1``, and ``n2`` is the length of the first axis of ``x2``. This is used in the computation of the gradient and Hessian of the kernel. The first two indices represents the different derivatives with respect to each hyperparameter. :param x1: First input array. Must be a 1-D or 2-D array, with the length of the last dimension matching the last dimension of ``x2`` and one less than the length of ``params``. ``x1`` may be 1-D if either each point consists of a single parameter (and ``params`` has length 2) or the array only contains a single point (in which case, the array will be reshaped to ``(1, D - 1)``). :type x1: array-like :param x2: Second input array. The same restrictions that apply to ``x1`` also apply here. :type x2: array-like :param params: Hyperparameter array. Must be 1-D with length one greater than the last dimension of ``x1`` and ``x2``. :type params: array-like :returns: Array holding the second derivatives of the pair-wise distances between points in arrays ``x1`` and ``x2`` with respect to the hyperparameters. Will be an array with shape ``(D, D, n1, n2)``, where ``D`` is the length of ``params``, ``n1`` is the length of the first axis of ``x1`` and ``n2`` is the length of the first axis of ``x2``. The first two axes indicates the different derivative components (i.e. the second derivative with respect to the first parameter is [0,0,:,:], the mixed partial with respect to the first and second parameters is [0,1,:,:] or [1,0,:,:], etc.) :rtype: ndarray """ x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params) exp_theta = np.exp(-params[:(D - 1)]) d2r2dtheta2 = np.zeros((D, D, n1, n2)) idx = np.arange(D) d2r2dtheta2[idx, idx] = self.calc_dr2dtheta(x1, x2, params) return d2r2dtheta2 class ProductKernel(KernelBase): "Product form of kernel" def calc_r2(self, x1, x2, params): r""" Calculate squared distance between all pairs of points This method computes the scaled Euclidean distance between all pairs of points in ``x1`` and ``x2`` along each axis. Each component distance is multiplied by the exponential of the corresponding hyperparameter. For example, if ``x1 = [[1., 2.]]``, ``x2`` = [[2., 4.]], and ``params = [2., 2.]`` then ``calc_r`` would return the array :math:`{[exp(2)*(1 - 2)^2, exp(2)*(2 - 4)^2]}` as an array with shape ``(2, 1,1)``. :param x1: First input array. Must be a 1-D or 2-D array, with the length of the last dimension matching the last dimension of ``x2`` and one less than the length of ``params``. ``x1`` may be 1-D if either each point consists of a single parameter (and ``params`` has length 2) or the array only contains a single point (in which case, the array will be reshaped to ``(1, D)``). :type x1: array-like :param x2: Second input array. The same restrictions that apply to ``x1`` also apply here. :type x2: array-like :param params: Hyperparameter array. Must be 1-D with length one greater than the last dimension of ``x1`` and ``x2``. :type params: array-like :returns: Array holding all pair-wise squared distances between points in arrays ``x1`` and ``x2``. Will be an array with shape ``(D, n1, n2)``, where ``D`` is the number of dimensions, ``n1`` is the length of the first axis of ``x1`` and ``n2`` is the length of the first axis of ``x2``. :rtype: ndarray """ x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params) exp_theta = np.exp(params) r2_matrix = exp_theta[np.newaxis, np.newaxis, :]*(x1[:, np.newaxis, :] - x2[np.newaxis, :, :])**2 if np.any(np.isinf(r2_matrix)): raise FloatingPointError("Inf enountered in kernel distance computation") return r2_matrix def kernel_f(self, x1, x2, params): r""" Compute kernel values for a set of inputs Returns the value of the kernel for two sets of input points and a choice of hyperparameters. This function should not need to be modified for different choices of the kernel function or distance metric, as after checking the inputs it simply calls the routine to compute the distance metric and then evaluates the kernel function for those distances. :param x1: First input array. Must be a 1-D or 2-D array, with the length of the last dimension matching the last dimension of ``x2`` and one less than the length of ``params``. ``x1`` may be 1-D if either each point consists of a single parameter (and ``params`` has length 2) or the array only contains a single point (in which case, the array will be reshaped to ``(1, D - 1)``). :type x1: array-like :param x2: Second input array. The same restrictions that apply to ``x1`` also apply here. :type x2: array-like :param params: Hyperparameter array. Must be 1-D with length one greater than the last dimension of ``x1`` and ``x2``. :type params: array-like :returns: Array holding all kernel values between points in arrays ``x1`` and ``x2``. Will be an array with shape ``(n1, n2)``, where ``n1`` is the length of the first axis of ``x1`` and ``n2`` is the length of the first axis of ``x2``. :rtype: ndarray """ x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params) return np.prod(self.calc_K(self.calc_r2(x1, x2, params)), axis=-1) def kernel_deriv(self, x1, x2, params): r""" Compute kernel gradient for a set of inputs Returns the value of the kernel gradient for two sets of input points and a choice of hyperparameters. This function should not need to be modified for different choices of the kernel function or distance metric, as after checking the inputs it simply calls the routine to compute the distance metric, kernel function, and the appropriate derivative functions of the distance and kernel functions. :param x1: First input array. Must be a 1-D or 2-D array, with the length of the last dimension matching the last dimension of ``x2`` and one less than the length of ``params``. ``x1`` may be 1-D if either each point consists of a single parameter (and ``params`` has length 2) or the array only contains a single point (in which case, the array will be reshaped to ``(1, D - 1)``). :type x1: array-like :param x2: Second input array. The same restrictions that apply to ``x1`` also apply here. :type x2: array-like :param params: Hyperparameter array. Must be 1-D with length one greater than the last dimension of ``x1`` and ``x2``. :type params: array-like :returns: Array holding the gradient of the kernel function between points in arrays ``x1`` and ``x2`` with respect to the hyperparameters. Will be an array with shape ``(D, n1, n2)``, where ``D`` is the length of ``params``, ``n1`` is the length of the first axis of ``x1`` and ``n2`` is the length of the first axis of ``x2``. The first axis indicates the different derivative components (i.e. the derivative with respect to the first parameter is [0,:,:], etc.) :rtype: ndarray """ x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params) r2_matrix = self.calc_r2(x1, x2, params) diag = self.calc_dKdr2(r2_matrix)*r2_matrix dKdtheta = np.broadcast_to(self.calc_K(r2_matrix), (D, n1, n2, D)).copy() idx = np.arange(0, D, 1) dKdtheta[idx, :, :, idx] = np.transpose(diag, (2, 0, 1)) return np.prod(dKdtheta, axis=-1) def kernel_hessian(self, x1, x2, params): r""" Calculate the Hessian of the kernel evaluated for all pairs of points with respect to the hyperparameters Returns the value of the kernel Hessian for two sets of input points and a choice of hyperparameters. This function should not need to be modified for different choices of the kernel function or distance metric, as after checking the inputs it simply calls the routine to compute the distance metric, kernel function, and the appropriate derivative functions of the distance and kernel functions. :param x1: First input array. Must be a 1-D or 2-D array, with the length of the last dimension matching the last dimension of ``x2`` and one less than the length of ``params``. ``x1`` may be 1-D if either each point consists of a single parameter (and ``params`` has length 2) or the array only contains a single point (in which case, the array will be reshaped to ``(1, D - 1)``). :type x1: array-like :param x2: Second input array. The same restrictions that apply to ``x1`` also apply here. :type x2: array-like :param params: Hyperparameter array. Must be 1-D with length one greater than the last dimension of ``x1`` and ``x2``. :type params: array-like :returns: Array holding the Hessian of the pair-wise distances between points in arrays ``x1`` and ``x2`` with respect to the hyperparameters. Will be an array with shape ``(D, D, n1, n2)``, where ``D`` is the length of ``params``, ``n1`` is the length of the first axis of ``x1`` and ``n2`` is the length of the first axis of ``x2``. The first two axes indicates the different derivative components (i.e. the second derivative with respect to the first parameter is [0,0,:,:], the mixed partial with respect to the first and second parameters is [0,1,:,:] or [1,0,:,:], etc.) :rtype: ndarray """ x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params) r2_matrix = self.calc_r2(x1, x2, params) off_diag = self.calc_dKdr2(r2_matrix)*r2_matrix diag = self.calc_d2Kdr22(r2_matrix)*r2_matrix**2 + self.calc_dKdr2(r2_matrix)*r2_matrix d2Kdtheta2 = np.broadcast_to(self.calc_K(r2_matrix), (D, D, n1, n2, D)).copy() idx = np.arange(0, D, 1) print(d2Kdtheta2) d2Kdtheta2[idx, :, :, :, idx] = np.transpose(off_diag[np.newaxis, :, :, :], (0, 3, 1, 2)) print(d2Kdtheta2) d2Kdtheta2[:, idx, :, :, idx] = np.transpose(off_diag[np.newaxis, :, :, :], (0, 3, 1, 2)) print(d2Kdtheta2) d2Kdtheta2[idx, idx, :, :, idx] = np.transpose(diag, (2, 0, 1)) print(d2Kdtheta2) return np.prod(d2Kdtheta2, axis=-1) class SqExpBase(object): r""" Base Implementation of the squared exponential kernel Class representing the spatial functions for the squared exponential kernel. """ def calc_K(self, r2): r""" Compute K(r^2) for the squared exponential kernel This method implements the squared exponential kernel function as a function of distance. Given an array of distances, this function evaluates the kernel function of those values, returning an array of the same shape. :param r2: Array holding distances between all points. All values in this array must be non-negative. :type r2: array-like :returns: Array holding kernel evaluations, with the same shape as the input ``r`` :rtype: ndarray """ assert np.all(r2 >= 0.), "kernel distances must be positive" r2 = np.array(r2) return np.exp(-0.5*r2) def calc_dKdr2(self, r2): r""" Calculate first derivative of the squared exponential kernel as a function of squared distance This method implements the first derivative of the squared exponential kernel function as a function of squared distance. Given an array of squared distances, this function evaluates the derivative function of those values, returning an array of the same shape. :param r2: Array holding squared distances between all points. All values in this array must be non-negative. :type r2: array-like :returns: Array holding kernel derivatives, with the same shape as the input ``r2`` :rtype: ndarray """ assert np.all(r2 >= 0.), "kernel distances must be positive" r2 = np.array(r2) return -0.5*np.exp(-0.5*r2) def calc_d2Kdr22(self, r2): r""" Calculate second derivative of the squared exponential kernel as a function of squared distance This method implements the second derivative of the squared exponential kernel function as a function of squared distance. Given an array of squared distances, this function evaluates the second derivative function of those values, returning an array of the same shape. :param r2: Array holding distances between all points. All values in this array must be non-negative. :type r2: array-like :returns: Array holding kernel second derivatives, with the same shape as the input ``r2`` :rtype: ndarray """ assert np.all(r2 >= 0.), "kernel distances must be positive" r2 =
np.array(r2)
numpy.array
import numpy as np def plot_psresp(slopes, dt, df, suf, mean_slope, slope_error, best_parameters, statistics): """ Plot the success fraction over slopes for parameters satisfying the significance criterion and the histogram over the grid of parameters. Parameters ---------- slopes : `~numpy.ndarray` slopes of the power law model dt : `~numpy.ndarray` bin length for the light curve in units of ``t`` df : `~numpy.ndarray` bin factor for the logarithmic periodogram suf : `~numpy.ndarray` Success fraction for each model parameter mean_slope : `~float` Mean slope of the power law slope_error : `~float` Error of the mean slope best_parameters : `~numpy.ndarray` Parameters satisfying the significance criterion statistics : `~numpy.ndarray` Data used to calculate the mean slope and its error over a grid of ``dt`` and ``df`` - slope with the highest success fraction - highest success fraction - slope of the lower full width at half maximum for the success fraction distribution - slope of the higher full width at half maximum for the success fraction distribution Returns ------- fig : `~matplotlib.Figure` Figure """ import matplotlib.pyplot as plt from matplotlib import rc from matplotlib import rcParams from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm fig = plt.figure(figsize=(11, 11)) for indx in range(len(best_parameters[0])): plt.plot(slopes, suf[:, dt == dt[best_parameters[0][indx]], df == df[best_parameters[1][indx]]], label='t bin = {}, f = {}'.format(dt[best_parameters[0][indx]], df[best_parameters[1][indx]])) plt.axhline(y=0.5 * np.max(statistics[1, :, :][best_parameters]), xmin=np.min(statistics[2, :, :][best_parameters]*2/3-2/3), xmax=np.max(statistics[3, :, :][best_parameters])*2/3-2/3, color='k' ) plt.axvline(x=mean_slope, ymin=0, ymax=np.max(statistics[1, :, :][best_parameters]), color='k' ) plt.text(mean_slope + 0.01, 0.5 * np.max(statistics[1, :, :][best_parameters]) + 0.01, 'FWHM = {:.1f}'.format(slope_error)) plt.text(mean_slope + 0.01, 0.01, 'mean slope = {}'.format(mean_slope)) plt.xlabel('slope') plt.ylabel('success fraction') plt.xlim(np.min(slopes), np.max(slopes)) plt.ylim(0, 1) plt.legend() # plt.savefig('SuF', bbox_inches='tight') return fig fig = plt.figure(figsize=(11, 11)) ax = fig.gca(projection='3d') X, Y, Z = -slopes, dt, df suf_test = suf > 0.95 *
np.max(suf)
numpy.max
import cv2 import numpy as np import matplotlib.pyplot as plt import os import tqdm from scipy import interpolate from mouse_detection.tracker import EuclideanDistTracker def savitzky_golay(y, window_size, order, deriv=0, rate=1): r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter. The Savitzky-Golay filter removes high frequency noise from data. It has the advantage of preserving the original shape and features of the signal better than other types of filtering approaches, such as moving averages techniques. Parameters ---------- y : array_like, shape (N,) the values of the time history of the signal. window_size : int the length of the window. Must be an odd integer number. order : int the order of the polynomial used in the filtering. Must be less then `window_size` - 1. deriv: int the order of the derivative to compute (default = 0 means only smoothing) Returns ------- ys : ndarray, shape (N) the smoothed signal (or it's n-th derivative). Notes ----- The Savitzky-Golay is a type of low-pass filter, particularly suited for smoothing noisy data. The main idea behind this approach is to make for each point a least-square fit with a polynomial of high order over a odd-sized window centered at the point. Examples -------- t = np.linspace(-4, 4, 500) y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape) ysg = savitzky_golay(y, window_size=31, order=4) import matplotlib.pyplot as plt plt.plot(t, y, label='Noisy signal') plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal') plt.plot(t, ysg, 'r', label='Filtered signal') plt.legend() plt.show() References ---------- .. [1] <NAME>, <NAME>, Smoothing and Differentiation of Data by Simplified Least Squares Procedures. Analytical Chemistry, 1964, 36 (8), pp 1627-1639. .. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing W.H. Press, <NAME>, <NAME>, <NAME> Cambridge University Press ISBN-13: 9780521880688 """ import numpy as np from math import factorial try: window_size = np.abs(np.int(window_size)) order = np.abs(np.int(order)) except ValueError as msg: raise ValueError("window_size and order have to be of type int") if window_size % 2 != 1 or window_size < 1: raise TypeError("window_size size must be a positive odd number") if window_size < order + 2: raise TypeError("window_size is too small for the polynomials order") order_range = range(order+1) half_window = (window_size -1) // 2 # precompute coefficients b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)]) m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv) # pad the signal at the extremes with # values taken from the signal itself firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] ) lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1]) y = np.concatenate((firstvals, y, lastvals)) return np.convolve( m[::-1], y, mode='valid') def write_video(filepath, shape, fps=30): fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') video_height, video_width, CHANNELS = shape video_filename = filepath writer = cv2.VideoWriter(video_filename, fourcc, fps, (video_width, video_height), isColor=True) return writer def read_video(video_path, block=False, num_blocks=None, index=None): ''' Read video in blocks or directly in memory, if block mode is selected reads only block by index :param video_path: path to the video :param block: allow block reading :param num_blocks: number of blocks. eg. 10 :param index: index of the block. eg. 2 for the third block :return: np.array of frames as uint8 type ''' print('Reading video: ', video_path) cap = cv2.VideoCapture(video_path) frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) print('video props: (frameCount, frameHeight, frameWidth)=', (frameCount, frameHeight, frameWidth)) fc = 0 ret = True if not block: buf = np.empty((frameCount, frameHeight, frameWidth, 3), np.dtype('uint8')) while fc < frameCount and ret: ret, ff = cap.read() if ret: buf[fc] = ff fc += 1 cap.release() else: # calculate block indices: block_length = frameCount // num_blocks a = index * block_length b = a + block_length if index == num_blocks - 1: b += frameCount % num_blocks buf = np.empty((b - a, frameHeight, frameWidth, 3), np.dtype('uint8')) cnt = 0 while (fc < frameCount and ret and fc < b): ret, frame = cap.read() if fc < b and fc >= a: buf[cnt] = frame cnt += 1 fc += 1 return buf class BackgroundSubtractorTH: def __init__(self, init_frame=None, threshold=0.93): self.init_frame = init_frame self._track_window = None self.threshold = threshold def apply(self, frame): if self.init_frame is not None: frame = frame - self.init_frame frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) value = int(self.threshold * 255) ret, th1 = cv2.threshold(frame_gray, value, 255, cv2.THRESH_BINARY) frame = np.stack([th1, th1, th1], axis=-1) cv2.normalize(frame, frame, 0, 255, cv2.NORM_MINMAX) return frame def createBackgroundSubtractorTH(init_image=None, bkg_threshold=0.93): return BackgroundSubtractorTH(init_frame=init_image, threshold=bkg_threshold) def _remove_blackchannel(img): w = 0.98 miu = 0.95 ## # Calculate A I_inv = 255.0 - img I_min = np.min(I_inv, axis=2) kernel =
np.ones((3, 3), np.float32)
numpy.ones
# encoding: utf-8 from __future__ import division import sys import os import time import datetime import pandas as pd import numpy as np import math import ast CURRENT_DIR = os.path.abspath(os.path.dirname(__file__)) ADD_PATH = "%s/../"%(CURRENT_DIR) sys.path.append(ADD_PATH) from tools.mail import MyEmail from tools.html import html_with_style DATA_PATH = "%s/../data/basic_matrix" % (CURRENT_DIR) def reviewing_data(df,send_str): if len(df) ==0: send_str += '零单' + '<br>' return send_str send_str += '审核中单量:'+str(len(df[(df['type']=='nan')])) + ' ' + '<br>' send_str += '审核中比例:'+str(100.0*len(df[(df['type']=='nan')])/len(df)) + ' ' + '<br>' + '<br>' return send_str def pass_rate(df,send_str): if len(df) == 0: send_str += '零完成单' + '<br>' return send_str out = [] concat = [] out.append((len(df),len(df[df['suggestion']=='1']),100.0*len(df[df['suggestion']=='1'])/len(df))) if len(df[df['type']=='first']) == 0: out.append((0,0,0)) else: out.append((len(df[df['type']=='first']),len(df[(df['suggestion']=='1')&(df['type']=='first')]),100.0*len(df[(df['suggestion']=='1')&(df['type']=='first')])/len(df[df['type']=='first']))) if len(df[df['type']=='regular']) == 0: out.append((0,0,0)) else: out.append((len(df[df['type']=='regular']),len(df[(df['suggestion']=='1')&(df['type']=='regular')]),100.0*len(df[(df['suggestion']=='1')&(df['type']=='regular')])/len(df[df['type']=='regular']))) if len(df[df['type']=='again']) == 0: out.append((0,0,0)) else: out.append((len(df[df['type']=='again']),len(df[(df['suggestion']=='1')&(df['type']=='again')]),100.0*len(df[(df['suggestion']=='1')&(df['type']=='again')])/len(df[df['type']=='again']))) frame = pd.DataFrame(out, index=['总通过率','新单通过率','续贷通过率','再次通过率'], columns=['订单申请数','通过数','通过率']) concat.append(frame) concat = pd.concat(concat, keys=['机审通过率'], axis=1) send_str += html_with_style(concat) + '<br>' return send_str def baseline(df,send_str): if len(df) == 0: send_str += '无新单' + '<br>' return send_str out = [] concat = [] baseline_data = [] baseline = df['baseline'].values.tolist() for i in baseline: temp = ast.literal_eval(i) if len(temp) != 0: baseline_data.extend(temp) if baseline_data == []: send_str += '无baseline' + '<br>' return send_str for i in set(baseline_data): out.append((i,baseline_data.count(i),100.0*baseline_data.count(i)/len(df))) frame = pd.DataFrame(out, columns=['baseline','个数','拒绝率']) concat.append(frame) concat = pd.concat(concat, keys=['新单baseline拒绝率'], axis=1) send_str += html_with_style(concat) + '<br>' return send_str def score(df,send_str): if len(df) == 0: send_str += '无新单' + '<br>' return send_str out = [] concat = [] temp = df['score'].values.tolist() score = [] for i in temp: try: score.append(int(i)) except: continue score = np.array(score) out.append(('>1000',sum(np.array(score)>1000),100.0*sum(np.array(score)>1000)/len(np.array(score)))) out.append(('901-1000',(sum(np.array(score)>900)-sum(np.array(score)>1000)),100.0*(sum(np.array(score)>900)-sum(np.array(score)>1000))/len(np.array(score)))) out.append(('801-900',(sum(np.array(score)>800)-sum(np.array(score)>900)),100.0*(sum(np.array(score)>800)-sum(np.array(score)>900))/len(np.array(score)))) out.append(('701-800',(sum(
np.array(score)
numpy.array
import numpy as np from onnx import TensorProto, helper from finn.custom_op import CustomOp import finn.util.basic as util from finn.core.datatype import DataType # adapted from <NAME>'s CS231 im2col code # utilities to generate a patch matrix from a multichannel image # of shape (batches, channels, height, width) def compute_conv_output_dim(ifm_dim, k, stride, pad=0): """Returns spatial output dimension size for convolution with given params.""" return int(((ifm_dim + 2 * pad - k) / stride) + 1) def get_im2col_indices_nchw( x_shape, field_height, field_width, padding=0, stride_y=1, stride_x=1 ): """Returns im2col indices.""" # First figure out what the size of the output should be N, C, H, W = x_shape assert (H + 2 * padding - field_height) % stride_y == 0 assert (W + 2 * padding - field_width) % stride_x == 0 out_height = compute_conv_output_dim(H, field_height, stride_y, padding) out_width = compute_conv_output_dim(W, field_width, stride_x, padding) i0 = np.repeat(np.arange(field_height), field_width) i0 = np.tile(i0, C) i1 = stride_y * np.repeat(
np.arange(out_height)
numpy.arange
import numpy as np import argparse from base_module import Posenet, Camnet, discriminator, Encoder from mmdgan_mh_enc import Pose_mmdgan_enc import os import random import tensorflow as tf import scipy.io as sio import logging, logging.config import sys from eval_functions import err_3dpe import ops parse = argparse.ArgumentParser() parse.add_argument("--batchsize", help= "the batch size used in training", default=128, type = int) parse.add_argument("--epochs", help="number of epochs during training", default=50, type = int) parse.add_argument("--latent_dim", help="dimension of latent space", default=1024, type = int) parse.add_argument("--latent_dim_pose", help="dimension for pose in the latent space of discriminator", default=128, type=int) parse.add_argument("--latent_dim_kcs", help="dimension for kcs in the latent space of discriminator", default=1024, type=int) parse.add_argument("--d_output_dim", help="dimension for output of discriminator", default=8, type=int) parse.add_argument("--lr", help="learning rate", default=1e-4, type=float) parse.add_argument("--architecture", help="which architeture to use[mmdgan, mmdgan_enc]", default='mmdgan_enc', type=str) parse.add_argument("--beta1", help="beta1 for adamoptimizor", default=0.5, type=float) parse.add_argument("--diter", help="the number of discriminator updates oer generator updates", default=1, type=int) parse.add_argument("--kernel", help="kernel type used in mmd[dot, mix_rbf, mix_rq]", default='mix_rq', type=str) parse.add_argument("--repro_weight", help="weight of reprojection loss", default=10.0, type=float) parse.add_argument("--cam_weight", help="weight of camera loss", default=10.0, type=float) parse.add_argument("--gp_weight", help="weight of dot kernel in mix kernel", default=0.1, type=float) parse.add_argument("--reg_weight", help="weight for regularizer", default=7.5, type=float) parse.add_argument("--dot_weight", help="weight of dot kernel in mix kernel", default=10.0, type=float) parse.add_argument("--lr_decay", help="learning rate decay rate", default=0.94, type=float) parse.add_argument("--enc_weight", help="weight of encoder", default=10.0, type=float) parse.add_argument("--sampling", help="set to true if generate samples", default=True, type=bool) parse.add_argument("--checkpoint", help="which model to load", default=0, type=int) # 931070 for gt data # 971070 for shft parse.add_argument("--num_samples", help="number of hypotheses", default=10, type=int) parse.add_argument("--datatype", help="datatype used for training [GT, SHFT, GTMJ]", default='GT', type=str) parse.add_argument("--load_path", help="specify the path to load model", default='./models', type=str) args = parse.parse_args() actions = ['Directions', 'Discussion', 'Eating', 'Greeting', 'Phoning', 'Photo', 'Posing', 'Purchases', 'Sitting', 'SittingDown', 'Smoking', 'Waiting', 'WalkDog', 'WalkTogether', 'Walking'] pose3d_dim = 16 * 3 pose2d_dim = 16 * 2 cam_dim = 6 lr = args.lr model_name = '{}_regweight{}_encweight{}_2D{}'.format(args.architecture, args.reg_weight, args.enc_weight, args.datatype) log_dir = 'logs_eval' if not os.path.exists(log_dir): os.makedirs(log_dir) logging.config.fileConfig('./logging.conf') logger = logging.getLogger() fileHandler = logging.FileHandler("{0}/log.txt".format(log_dir)) logger.addHandler(fileHandler) logger.info("Logs will be written to %s" % log_dir) def log_arguments(): logger.info('Command: %s', ' '.join(sys.argv)) s = '\n'.join([' {}: {}'.format(arg, getattr(args, arg)) for arg in vars(args)]) s = 'Arguments:\n' + s logger.info(s) log_arguments() posenet = Posenet(args.latent_dim, pose3d_dim) camnet = Camnet(args.latent_dim, cam_dim) disc = discriminator(args.latent_dim_pose, args.latent_dim_kcs, args.d_output_dim) encoder = Encoder(args.latent_dim, args.latent_dim) mmd_posenet = Pose_mmdgan_enc(posenet, camnet, disc, encoder, args.latent_dim, args.batchsize, log_dir, args.epochs, pose2d_dim, pose3d_dim, args.kernel, args.repro_weight, args.cam_weight, args.gp_weight, args.reg_weight, args.dot_weight, args.enc_weight) mmd_posenet.build_model() config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: batchsize = args.batchsize load_dir = os.path.join(args.load_path, model_name) ckpt = tf.train.get_checkpoint_state(load_dir, latest_filename="checkpoint") if args.checkpoint > 0: ckpt_name = os.path.join(os.path.join(load_dir, "checkpoint-{}".format(args.checkpoint))) else: ckpt_name = ckpt.model_checkpoint_path mmd_posenet.saver.restore(sess, ckpt_name) print('Loading model {}'.format(os.path.basename(ckpt_name))) path = 'new_data/test/2d{}_3dTEM'.format(args.datatype) path_cam = 'new_data/test/2d{}_3dCAM'.format(args.datatype) logger.info('{0:>15} {1:>30} {2:>30}'.format('Action', 'Protocol1', 'Protocol2')) val_best_all = [] valcam_best_all = [] val_zc_all = [] valcam_zc_all = [] for action in actions: data_2d_3d_test = sio.loadmat('{}/{}_2d{}_3d_test.mat'.format(path, action, args.datatype)) data_cam = sio.loadmat('{}/{}_2d{}_3d_test.mat'.format(path_cam, action, args.datatype)) poses2d_eval = data_2d_3d_test['poses_2d'][::64, :] poses3d_eval = data_2d_3d_test['poses_3d'][::64, :] / 1000 poses_3d_cam = data_cam['poses_3d'][::64, :] / 1000 poses_zc = [] posescam_zc = [] # generate results under zero code setting for eval in range(poses2d_eval.shape[0] // batchsize): noise_zc = np.zeros([batchsize, args.latent_dim]) poses, cam = mmd_posenet.inference(sess, poses2d_eval[eval * batchsize: (eval + 1) * batchsize], poses3d_eval[eval * batchsize: (eval + 1) * batchsize], noise_zc, lr) poses_reshape = np.reshape(poses, [poses.shape[0], 3, 16]) k = np.reshape(cam, [cam.shape[0], 2, 3]) R = ops.compute_R(k) # recover rotation matrix from camera matrix poses_cam = np.matmul(R, poses_reshape) # transfer pose from the template frame to the camera frame poses_cam_reshape = np.reshape(poses_cam, [poses_cam.shape[0], -1]) posescam_zc.append(poses_cam_reshape) poses_zc.append(poses) poses_zc = np.vstack(poses_zc) posescam_zc = np.vstack(posescam_zc) # compute the error under zero code setting val_zc = 0.0 valcam_zc = 0.0 for p in range(poses_zc.shape[0]): err_zc = 1000 * err_3dpe(poses3d_eval[p:p + 1, :], poses_zc[p:p + 1, :], True) errcam_zc = 1000 * err_3dpe(poses_3d_cam[p:p + 1, :], 1.1 * posescam_zc[p:p + 1, :], False) # scale the output according to the ratio between poses in camera frame and poses in template frame in the training set val_zc = val_zc + err_zc valcam_zc = valcam_zc + errcam_zc val_zc_all.append(err_zc) valcam_zc_all.append(errcam_zc) val_zc = val_zc / poses_zc.shape[0] valcam_zc = valcam_zc/posescam_zc.shape[0] # generate results for multiple hypotheses poses_samples_all = [] posescam_samples_all = [] R_all = [] poses_repro_all = [] for eval in range(poses2d_eval.shape[0] // batchsize): poses_samples_batch = [] posescam_samples_batch = [] poses_repro_batch = [] for i in range(args.num_samples): z_test = np.random.normal(0, 1, (batchsize, args.latent_dim)) posespred, campred = mmd_posenet.inference(sess, poses2d_eval[eval * batchsize: (eval + 1) * batchsize], poses3d_eval[eval * batchsize: (eval + 1) * batchsize], z_test, lr) posespred_reshape = np.reshape(posespred, [posespred.shape[0], 3, 16]) poses_samples_batch.append(posespred) k = np.reshape(campred, [campred.shape[0], 2, 3]) R = ops.compute_R(k) posespred_cam = np.matmul(R, posespred_reshape) posespred_cam_reshape = np.reshape(posespred_cam, [posespred_cam.shape[0], -1]) posescam_samples_batch.append(posespred_cam_reshape) poses_repro = np.reshape(np.matmul(k, posespred_reshape), [posespred.shape[0], -1]) poses_repro_batch.append(poses_repro) poses_samples_batch = np.stack(poses_samples_batch, axis=1) poses_samples_all.append(poses_samples_batch) posescam_samples_batch = np.stack(posescam_samples_batch,axis=1) posescam_samples_all.append(posescam_samples_batch) poses_repro_batch = np.stack(poses_repro_batch, axis=1) poses_repro_all.append(poses_repro_batch) R_all.append(R) poses_samples_all = np.concatenate(poses_samples_all, axis=0) posescam_samples_all = np.concatenate(posescam_samples_all, axis=0) poses_repro_all = np.concatenate(poses_repro_all, axis=0) R_all = np.concatenate(R_all, axis=0) # compute error for bh setting err = np.zeros([poses_samples_all.shape[0], poses_samples_all.shape[1]]) err_cam =
np.zeros([poses_samples_all.shape[0], poses_samples_all.shape[1]])
numpy.zeros
"""Control functions for WFSC.""" import numpy as np # import multiprocessing # from astropy.io import fits # import matplotlib.pyplot as plt import falco def wrapper(mp, cvar, jacStruct): """ Outermost wrapper function for all the controller functions. Parameters ---------- mp : ModelParameters Structure containing optical model parameters cvar : ModelParameters Structure containing controller variables jacStruct : ModelParameters Structure containing control Jacobians for each specified DM. Returns ------- None Changes are made by reference to mp. """ # if type(mp) is not falco.config.ModelParameters: # raise TypeError('Input "mp" must be of type ModelParameters') # pass # with falco.util.TicToc('Using the Jacobian to make other matrices'): print('Using the Jacobian to make other matrices...', end='') # Compute matrices for linear control with regular EFC cvar.GstarG_wsum = np.zeros((cvar.NeleAll, cvar.NeleAll)) cvar.RealGstarEab_wsum = np.zeros((cvar.NeleAll, 1)) for im in range(mp.jac.Nmode): Gmode = np.zeros((mp.Fend.corr.Npix, 1), dtype=complex) # Initialize a row to concatenate onto if(any(mp.dm_ind == 1)): Gmode = np.hstack((Gmode,np.squeeze(jacStruct.G1[:,:,im]))) if(any(mp.dm_ind == 2)): Gmode = np.hstack((Gmode,np.squeeze(jacStruct.G2[:,:,im]))) if(any(mp.dm_ind == 8)): Gmode = np.hstack((Gmode,np.squeeze(jacStruct.G8[:,:,im]))) if(any(mp.dm_ind == 9)): Gmode = np.hstack((Gmode,np.squeeze(jacStruct.G9[:,:,im]))) Gmode = Gmode[:, 1:] # Remove the zero column used for initialization # Square matrix part stays the same if no re-linearization has occurrred. cvar.GstarG_wsum += mp.jac.weights[im]*np.real(np.conj(Gmode).T @ Gmode) # The G^*E part changes each iteration because the E-field changes. # Apply 2-D spatial weighting to E-field in dark hole pixels. Eweighted = mp.WspatialVec*cvar.EfieldVec[:, im] # Apply the Jacobian weights and add to the total. cvar.RealGstarEab_wsum += mp.jac.weights[im]*np.real( np.conj(Gmode).T @ Eweighted.reshape(mp.Fend.corr.Npix, 1)) # Make the regularization matrix. (Define only diagonal here to save RAM.) cvar.EyeGstarGdiag = np.max(np.diag(cvar.GstarG_wsum))*np.ones(cvar.NeleAll) cvar.EyeNorm = np.max(np.diag(cvar.GstarG_wsum)) print('done.') # Call the Controller Function print('Control beginning ...') # Established, conventional controllers if mp.controller.lower() == 'plannedefc': dDM = _planned_efc(mp, cvar) elif mp.controller.lower() == 'gridsearchefc': dDM = _grid_search_efc(mp, cvar) # Update the DM commands by adding the delta control signal if(any(mp.dm_ind == 1)): mp.dm1.V += dDM.dDM1V if(any(mp.dm_ind == 2)): mp.dm2.V += dDM.dDM2V if(any(mp.dm_ind == 8)): mp.dm8.V += dDM.dDM8V if(any(mp.dm_ind == 9)): mp.dm9.V += dDM.dDM9V # Save the delta from the previous command if(any(mp.dm_ind == 1)): mp.dm1.dV = dDM.dDM1V if(any(mp.dm_ind == 2)): mp.dm2.dV = dDM.dDM2V if(any(mp.dm_ind == 8)): mp.dm8.dV = dDM.dDM8V if(any(mp.dm_ind == 9)): mp.dm9.dV = dDM.dDM9V def cull_actuators(mp, cvar, jacStruct): """ Remove weak actuators from the controlled set. Parameters ---------- mp : ModelParameters Structure containing optical model parameters cvar : ModelParameters Structure containing controller variables jacStruct : ModelParameters Structure containing control Jacobians for each specified DM. Returns ------- None Changes are made by reference to mp and jacStruct. """ if type(mp) is not falco.config.ModelParameters: raise TypeError('Input "mp" must be of type ModelParameters') # Reduce the number of actuators used based on their relative strength # in the Jacobian if(cvar.flagCullAct and cvar.flagRelin): print('Weeding out weak actuators from the control Jacobian...') if(any(mp.dm_ind == 1)): G1intNorm = np.sum(np.mean(np.abs(jacStruct.G1)**2, axis=2), axis=0) G1intNorm = G1intNorm/np.max(G1intNorm) mp.dm1.act_ele = np.nonzero(G1intNorm >= 10**(mp.logGmin))[0] del G1intNorm if(any(mp.dm_ind == 2)): G2intNorm = np.sum(np.mean(np.abs(jacStruct.G2)**2, axis=2), axis=0) G2intNorm = G2intNorm/np.max(G2intNorm) mp.dm2.act_ele = np.nonzero(G2intNorm >= 10**(mp.logGmin))[0] del G2intNorm if(any(mp.dm_ind == 8)): G8intNorm = np.sum(np.mean(np.abs(jacStruct.G8)**2, axis=2), axis=0) G8intNorm = G8intNorm/np.max(G8intNorm) mp.dm8.act_ele = np.nonzero(G8intNorm >= 10**(mp.logGmin))[0] del G8intNorm if(any(mp.dm_ind == 9)): G9intNorm = np.sum(np.mean(np.abs(jacStruct.G9)**2, axis=2), axis=0) G9intNorm = G9intNorm/np.max(G9intNorm) mp.dm9.act_ele = np.nonzero(G9intNorm >= 10**(mp.logGmin))[0] del G9intNorm # Add back in all actuators that are tied (to make the tied actuator # logic easier) if(any(mp.dm_ind == 1)): for ti in range(mp.dm1.tied.shape[0]): if not (any(mp.dm1.act_ele == mp.dm1.tied[ti, 0])): mp.dm1.act_ele = np.hstack([mp.dm1.act_ele, mp.dm1.tied[ti, 0]]) if not (any(mp.dm1.act_ele == mp.dm1.tied[ti, 1])): mp.dm1.act_ele = np.hstack([mp.dm1.act_ele, mp.dm1.tied[ti, 1]]) # Need to sort for the logic in model_Jacobian.m mp.dm1.act_ele = np.sort(mp.dm1.act_ele) if(any(mp.dm_ind == 2)): for ti in range(mp.dm2.tied.shape[0]): if not any(mp.dm2.act_ele == mp.dm2.tied[ti, 0]): mp.dm2.act_ele = np.hstack([mp.dm2.act_ele, mp.dm2.tied[ti, 0]]) if not any(mp.dm2.act_ele == mp.dm2.tied[ti, 1]): mp.dm2.act_ele = np.hstack([mp.dm2.act_ele, mp.dm2.tied[ti, 1]]) # Need to sort for the logic in model_Jacobian.m mp.dm2.act_ele = np.sort(mp.dm2.act_ele) # if(any(mp.dm_ind == 8)) # for ti=1:size(mp.dm8.tied,1) # if(any(mp.dm8.act_ele==mp.dm8.tied(ti,1))==false); mp.dm8.act_ele = [mp.dm8.act_ele; mp.dm8.tied(ti,1)]; end # if(any(mp.dm8.act_ele==mp.dm8.tied(ti,2))==false); mp.dm8.act_ele = [mp.dm8.act_ele; mp.dm8.tied(ti,2)]; end # end # mp.dm8.act_ele = sort(mp.dm8.act_ele); # end # if(any(mp.dm_ind == 9)) # for ti=1:size(mp.dm9.tied,1) # if(any(mp.dm9.act_ele==mp.dm9.tied(ti,1))==false); mp.dm9.act_ele = [mp.dm9.act_ele; mp.dm9.tied(ti,1)]; end # if(any(mp.dm9.act_ele==mp.dm9.tied(ti,2))==false); mp.dm9.act_ele = [mp.dm9.act_ele; mp.dm9.tied(ti,2)]; end # end # mp.dm9.act_ele = sort(mp.dm9.act_ele); # end # Update the number of elements used per DM if(any(mp.dm_ind == 1)): mp.dm1.Nele = mp.dm1.act_ele.size if(any(mp.dm_ind == 2)): mp.dm2.Nele = mp.dm2.act_ele.size if(any(mp.dm_ind == 8)): mp.dm8.Nele = mp.dm8.act_ele.size if(any(mp.dm_ind == 9)): mp.dm9.Nele = mp.dm9.act_ele.size if(any(mp.dm_ind == 1)): print(' DM1: %d/%d (%.2f%%) actuators kept for Jacobian' % (mp.dm1.Nele, mp.dm1.NactTotal, 100*mp.dm1.Nele/mp.dm1.NactTotal)) if(any(mp.dm_ind == 2)): print(' DM2: %d/%d (%.2f%%) actuators kept for Jacobian' % (mp.dm2.Nele, mp.dm2.NactTotal, 100*mp.dm2.Nele/mp.dm2.NactTotal)) if(any(mp.dm_ind == 8)): print(' DM8: %d/%d (%.2f%%) actuators kept for Jacobian' % (mp.dm8.Nele, mp.dm8.NactTotal, 100*mp.dm8.Nele/mp.dm8.NactTotal)) if(any(mp.dm_ind == 9)): print(' DM9: %d/%d (%.2f%%) actuators kept for Jacobian' % (mp.dm9.Nele, mp.dm9.NactTotal, 100*mp.dm9.Nele/mp.dm9.NactTotal)) # Crop out unused actuators from the control Jacobian if(any(mp.dm_ind == 1)): jacStruct.G1 = jacStruct.G1[:, mp.dm1.act_ele, :] if(any(mp.dm_ind == 2)): jacStruct.G2 = jacStruct.G2[:, mp.dm2.act_ele, :] if(any(mp.dm_ind == 8)): jacStruct.G8 = jacStruct.G8[:, mp.dm8.act_ele, :] if(any(mp.dm_ind == 9)): jacStruct.G9 = jacStruct.G9[:, mp.dm9.act_ele, :] return None def _grid_search_efc(mp, cvar): """ Perform a grid search over specified variables for the controller. Parameters ---------- mp : ModelParameters Structure containing optical model parameters cvar : ModelParameters Structure containing controller variables Returns ------- dDM : ModelParameters Structure containing the delta DM commands for each DM """ # Make all combinations of the values vals_list = [(x, y) for y in mp.ctrl.dmfacVec for x in mp.ctrl.log10regVec] Nvals = len(mp.ctrl.log10regVec) * len(mp.ctrl.dmfacVec) InormVec = np.zeros(Nvals) # Initialize # Temporarily store computed DM commands so that the best one does not have # to be re-computed if(any(mp.dm_ind == 1)): dDM1V_store = np.zeros((mp.dm1.Nact, mp.dm1.Nact, Nvals)) if(any(mp.dm_ind == 2)): dDM2V_store = np.zeros((mp.dm2.Nact, mp.dm2.Nact, Nvals)) if(any(mp.dm_ind == 8)): dDM8V_store = np.zeros((mp.dm8.NactTotal, Nvals)) if(any(mp.dm_ind == 9)): dDM9V_store = np.zeros((mp.dm9.NactTotal, Nvals)) # Empirically find the regularization value giving the best contrast # if(mp.flagParallel and mp.ctrl.flagUseModel): # # Run the controller in parallel # pool = multiprocessing.Pool(processes=mp.Nthreads) # results = [pool.apply_async(_efc, args=(ni,vals_list,mp,cvar)) for ni in np.arange(Nvals,dtype=int) ] # results_ctrl = [p.get() for p in results] # All the Jacobians in a list # pool.close() # pool.join() # # # Convert from a list to arrays: # for ni in range(Nvals): # InormVec[ni] = results_ctrl[ni][0] # if(any(mp.dm_ind == 1)): dDM1V_store[:,:,ni] = results_ctrl[ni][1].dDM1V # if(any(mp.dm_ind == 2)): dDM2V_store[:,:,ni] = results_ctrl[ni][1].dDM2V # else: for ni in range(Nvals): [InormVec[ni], dDM_temp] = _efc(ni, vals_list, mp, cvar) # delta voltage commands if(any(mp.dm_ind == 1)): dDM1V_store[:, :, ni] = dDM_temp.dDM1V if(any(mp.dm_ind == 2)): dDM2V_store[:, :, ni] = dDM_temp.dDM2V if(any(mp.dm_ind == 8)): dDM8V_store[:, ni] = dDM_temp.dDM8V if(any(mp.dm_ind == 9)): dDM9V_store[:, ni] = dDM_temp.dDM9V # Print out results to the command line print('Scaling factor:\t', end='') for ni in range(Nvals): print('%.2f\t\t' % (vals_list[ni][1]), end='') print('\nlog10reg: \t', end='') for ni in range(Nvals): print('%.1f\t\t' % (vals_list[ni][0]), end='') print('\nInorm: \t', end='') for ni in range(Nvals): print('%.2e\t' % (InormVec[ni]), end='') print('\n', end='') # Find the best scaling factor and Lagrange multiplier pair based on the # best contrast. # [cvar.cMin,indBest] = np.min(InormVec) indBest = np.argmin(InormVec) cvar.cMin = np.min(InormVec) dDM = falco.config.Object() # delta voltage commands if(any(mp.dm_ind == 1)): dDM.dDM1V = np.squeeze(dDM1V_store[:, :, indBest]) if(any(mp.dm_ind == 2)): dDM.dDM2V = np.squeeze(dDM2V_store[:, :, indBest]) if(any(mp.dm_ind == 8)): dDM.dDM8V = np.squeeze(dDM8V_store[:, indBest]) if(any(mp.dm_ind == 9)): dDM.dDM9V = np.squeeze(dDM9V_store[:, indBest]) cvar.log10regUsed = vals_list[indBest][0] dmfacBest = vals_list[indBest][1] if(mp.ctrl.flagUseModel): print('Model-based grid search expects log10reg, = %.1f,\t dmfac = %.2f,\t %4.2e normalized intensity.' % (cvar.log10regUsed, dmfacBest, cvar.cMin)) else: print('Empirical grid search finds log10reg, = %.1f,\t dmfac = %.2f,\t %4.2e normalized intensity.' % (cvar.log10regUsed, dmfacBest, cvar.cMin)) return dDM def _planned_efc(mp, cvar): """ Perform a scheduled/planned set of EFC iterations. Parameters ---------- mp : ModelParameters Structure containing optical model parameters cvar : ModelParameters Structure containing controller variables Returns ------- dDM : ModelParameters Structure containing the delta DM commands for each DM """ # Make all combinations of the values vals_list = [(x, y) for y in mp.ctrl.dmfacVec for x in mp.ctrl.log10regVec] Nvals = len(mp.ctrl.log10regVec) * len(mp.ctrl.dmfacVec) InormVec = np.zeros(Nvals) # Initialize # Make more obvious names for conditions: relinearizeNow = any(np.array(mp.gridSearchItrVec) == cvar.Itr) useBestLog10Reg = np.imag(mp.ctrl.log10regSchedIn[cvar.Itr]) != 0 realLog10RegIsZero = np.real(mp.ctrl.log10regSchedIn[cvar.Itr]) == 0 # Step 1: Empirically find the "optimal" regularization value # (if told to for this iteration). if relinearizeNow: # Temporarily store computed DM commands so that the best one does # not have to be re-computed if(any(mp.dm_ind == 1)): dDM1V_store = np.zeros((mp.dm1.Nact, mp.dm1.Nact, Nvals)) if(any(mp.dm_ind == 2)): dDM2V_store = np.zeros((mp.dm2.Nact, mp.dm2.Nact, Nvals)) if(any(mp.dm_ind == 8)): dDM8V_store = np.zeros((mp.dm8.NactTotal, Nvals)) if(any(mp.dm_ind == 9)): dDM9V_store = np.zeros((mp.dm9.NactTotal, Nvals)) for ni in range(Nvals): [InormVec[ni], dDM_temp] = _efc(ni, vals_list, mp, cvar) # delta voltage commands if(any(mp.dm_ind == 1)): dDM1V_store[:, :, ni] = dDM_temp.dDM1V if(any(mp.dm_ind == 2)): dDM2V_store[:, :, ni] = dDM_temp.dDM2V if(any(mp.dm_ind == 8)): dDM8V_store[:, ni] = dDM_temp.dDM8V if(any(mp.dm_ind == 9)): dDM9V_store[:, ni] = dDM_temp.dDM9V # Print out results to the command line print('Scaling factor:\t', end='') for ni in range(Nvals): print('%.2f\t\t' % (vals_list[ni][1]), end='') print('\nlog10reg: \t', end='') for ni in range(Nvals): print('%.1f\t\t' % (vals_list[ni][0]), end='') print('\nInorm: \t', end='') for ni in range(Nvals): print('%.2e\t' % (InormVec[ni]), end='') print('\n', end='') # Find the best scaling factor and Lagrange multiplier pair based on # the best contrast. # [cvar.cMin,indBest] = np.min(InormVec) indBest = np.argmin(InormVec) cvar.cMin = np.min(InormVec) cvar.latestBestlog10reg = vals_list[indBest][0] cvar.latestBestDMfac = vals_list[indBest][1] if(mp.ctrl.flagUseModel): print(('Model-based grid search expects log10reg, = %.1f,\t ' + 'dmfac = %.2f,\t %4.2e normalized intensity.') % (cvar.latestBestlog10reg, cvar.latestBestDMfac, cvar.cMin)) else: print(('Empirical grid search finds log10reg, = %.1f,\t dmfac' + ' = %.2f,\t %4.2e normalized intensity.') % (cvar.latestBestlog10reg, cvar.latestBestDMfac, cvar.cMin)) # Skip steps 2 and 3 if the schedule for this iteration is just to use the # "optimal" regularization AND if grid search was performed this iteration. if relinearizeNow and useBestLog10Reg and realLog10RegIsZero: # delta voltage commands dDM = falco.config.Object() # Initialize if(any(mp.dm_ind == 1)): dDM.dDM1V = np.squeeze(dDM1V_store[:, :, indBest]) if(any(mp.dm_ind == 2)): dDM.dDM2V = np.squeeze(dDM2V_store[:, :, indBest]) if(any(mp.dm_ind == 8)): dDM.dDM8V = np.squeeze(dDM8V_store[:, indBest]) if(any(mp.dm_ind == 9)): dDM.dDM9V = np.squeeze(dDM9V_store[:, indBest]) log10regSchedOut = cvar.latestBestlog10reg else: # Step 2: For this iteration in the schedule, replace the imaginary # part of the regularization with the latest "optimal" regularization if useBestLog10Reg: log10regSchedOut = cvar.latestBestlog10reg + \ np.real(mp.ctrl.log10regSchedIn[cvar.Itr]) else: log10regSchedOut = np.real(mp.ctrl.log10regSchedIn[cvar.Itr]) # Step 3: Compute the EFC command to use ni = 0 if not hasattr(cvar, 'latestBestDMfac'): cvar.latestBestDMfac = 1 vals_list = [(x, y) for y in np.array([cvar.latestBestDMfac]) for x in np.array([log10regSchedOut])] [cvar.cMin, dDM] = _efc(ni, vals_list, mp, cvar) if mp.ctrl.flagUseModel: print(('Model expects scheduled log10(reg) = %.1f\t to give ' + '%4.2e normalized intensity.') % (log10regSchedOut, cvar.cMin)) else: print(('Scheduled log10reg = %.1f\t gives %4.2e normalized' + ' intensity.') % (log10regSchedOut, cvar.cMin)) cvar.log10regUsed = log10regSchedOut return dDM def efc_schedule_generator(scheduleMatrix): """ Generate the EFC schedule from an input matrix. Parameters ---------- scheduleMatrix : array_like DESCRIPTION. Returns ------- Nitr : int Number of WFSC iterations. relinItrVec : array_like DESCRIPTION. gridSearchItrVec : array_like DESCRIPTION. log10regSched : array_like DESCRIPTION. dm_ind_sched : list DESCRIPTION. Notes ----- CONTROL SCHEDULE. Columns of sched_mat are: % Column 0: # of iterations, % Column 1: log10(regularization), % Column 2: which DMs to use (12, 128, 129, or 1289) for control % Column 3: flag (0 = false, 1 = true), whether to re-linearize % at that iteration. % Column 4: flag (0 = false, 1 = true), whether to perform an % EFC parameter grid search to find the set giving the best % contrast . % The imaginary part of the log10(regularization) in column 1 is % replaced for that iteration with the optimal log10(regularization) % A row starting with [0, 0, 0, 1...] relinearizes only at that time """ # Number of correction iterations Nitr = int(np.real(np.sum(scheduleMatrix[:, 0]))) # Create the vectors of: # 1) iteration numbers at which to relinearize the Jacobian # 2) log10(regularization) at each correction iteration relinItrVec = np.array([]) # Initialize gridSearchItrVec = [] # Initialize log10regSched = np.zeros((Nitr,), dtype=complex) # Initialize dmIndList = np.zeros((Nitr,), dtype=int) # Initialize iterCount = 0 for iRow in range(scheduleMatrix.shape[0]): # When to re-linearize if int(np.real(scheduleMatrix[iRow, 3])) == 1: relinItrVec = np.append(relinItrVec, iterCount) # When to re-do the empirical EFC grid search if int(np.real(scheduleMatrix[iRow, 4])) == 1: gridSearchItrVec.append(iterCount) # Make the vector of regularizations at each iteration deltaIter = int(np.real(scheduleMatrix[iRow, 0])) if not deltaIter == 0: log10regSched[iterCount:(iterCount+deltaIter)] = \ scheduleMatrix[iRow, 1] dmIndList[iterCount:(iterCount+deltaIter)] = \ int(np.real(scheduleMatrix[iRow, 2])) iterCount += int(np.real(scheduleMatrix[iRow, 0])) gridSearchItrVec =
np.asarray(gridSearchItrVec)
numpy.asarray
import numpy as np import neworder import matplotlib.pyplot as plt from matplotlib import colors class Schelling(neworder.Model): def __init__(self, timeline, gridsize, categories, similarity): # NB missing this line can cause memory corruption super().__init__(timeline, neworder.MonteCarlo.deterministic_identical_stream) # category 0 is empty cell self.ncategories = len(categories) # randomly sample initial population according to category weights init_pop = self.mc.sample(np.prod(gridsize), categories).reshape(gridsize) self.sat = np.empty(gridsize, dtype=int) self.similarity = similarity self.domain = neworder.StateGrid(init_pop, neworder.Domain.CONSTRAIN) self.fig, self.img = self.__init_visualisation() def step(self): # start with empty cells being satisfied self.sat = (self.domain.state == 0) # !count! # count all neighbours, scaling by acceptable similarity ratio n_any = self.domain.count_neighbours(lambda x: x>0) * self.similarity for c in range(1,self.ncategories): # count neighbour with a specific state n_cat = self.domain.count_neighbours(lambda x: x==c) self.sat = np.logical_or(self.sat, np.logical_and(n_cat > n_any, self.domain.state == c)) # !count! n_unsat = np.sum(~self.sat) pop = self.domain.state.copy() free = list(zip(*np.where(pop == 0))) for src in zip(*
np.where(~self.sat)
numpy.where
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ 2D Convolutional Layer with multiple output channels Reference: http://machinelearninguru.com/computer_vision/basics/convolution/convolution_layer.html <NAME>, 2018-12-13 """ import matplotlib.pyplot as plt from scipy import misc import numpy as np from skimage import exposure from math import ceil def convolution2d(conv_input, conv_kernel, bias, strides=(1, 1), padding='same'): # This function which takes an input (Tensor) and a kernel (Tensor) # and returns the convolution of them # Args: # conv_input: a numpy array of size [input_height, input_width, input # of channels]. # conv_kernel: a numpy array of size [kernel_height, kernel_width, input # of channels, # output # of channels] represents the kernel of the Convolutional Layer's filter. # bias: a numpy array of size [output # of channels], represents the bias of the Convolutional # Layer's filter. # strides: a tuple of (convolution vertical stride, convolution horizontal stride). # padding: type of the padding scheme: 'same' or 'valid'. # Returns: # a numpy array (convolution output). assert len(conv_kernel.shape) == 4, "The size of kernel should be (kernel_height, kernel_width, input # of channels, output # of channels)" assert len(conv_input.shape) == 3, "The size of input should be (input_height, input_width, input # of channels)" assert conv_kernel.shape[2] == conv_input.shape[2], "the input and the kernel should have the same depth." input_w, input_h = conv_input.shape[1], conv_input.shape[0] # input_width and input_height kernel_w, kernel_h = conv_kernel.shape[1], conv_kernel.shape[0] # kernel_width and kernel_height output_depth = conv_kernel.shape[3] if padding == 'same': output_height = int(ceil(float(input_h) / float(strides[0]))) output_width = int(ceil(float(input_w) / float(strides[1]))) # Calculate the number of zeros which are needed to add as padding pad_along_height = max((output_height - 1) * strides[0] + kernel_h - input_h, 0) pad_along_width = max((output_width - 1) * strides[1] + kernel_w - input_w, 0) pad_top = pad_along_height // 2 # amount of zero padding on the top pad_bottom = pad_along_height - pad_top # amount of zero padding on the bottom pad_left = pad_along_width // 2 # amount of zero padding on the left pad_right = pad_along_width - pad_left # amount of zero padding on the right output = np.zeros((output_height, output_width, output_depth)) # convolution output # Add zero padding to the input image image_padded = np.zeros((conv_input.shape[0] + pad_along_height, conv_input.shape[1] + pad_along_width, conv_input.shape[2])) image_padded[pad_top:-pad_bottom, pad_left:-pad_right, :] = conv_input for ch in range(output_depth): for x in range(output_width): # Loop over every pixel of the output for y in range(output_height): # element-wise multiplication of the kernel and the image output[y, x, ch] = (conv_kernel[..., ch] * image_padded[y * strides[0]:y * strides[0] + kernel_h, x * strides[1]:x * strides[1] + kernel_w, :]).sum() + bias[ch] elif padding == 'valid': output_height = int(ceil(float(input_h - kernel_h + 1) / float(strides[0]))) output_width = int(ceil(float(input_w - kernel_w + 1) / float(strides[1]))) output = np.zeros((output_height, output_width, output_depth)) # convolution output for ch in range(output_depth): for x in range(output_width): # Loop over every pixel of the output for y in range(output_height): # element-wise multiplication of the kernel and the image output[y, x, ch] = (conv_kernel[..., ch] * conv_input[y * strides[0]:y * strides[0] + kernel_h, x * strides[1]:x * strides[1] + kernel_w, :]).sum() + bias[ch] return output # load the image img = misc.imread('image.jpg', mode='RGB') # The edge detection kernel kernel1 = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]])[..., None] kernel1 =
np.repeat(kernel1, 3, axis=2)
numpy.repeat
import numpy as np import matplotlib.pyplot as pl from vaccontrib.covid import ( get_covid_matrices ) from vaccontrib.main import ( get_reduced_vaccinated_susceptible_contribution_matrix, get_reduced_vaccinated_susceptible_eigenvector, get_eigenvector, get_next_generation_matrix_from_matrices, get_contribution_matrix, ) from tqdm import tqdm import matplotlib.ticker as mtick import bfmplot as bp colors = [ ['#E75740', '#58BDB2'], ['#F2957D', '#268D7C'], ] uv_colors = [ colors[0][0], colors[1][1] ] reduction =
np.linspace(1,0,41)
numpy.linspace
from __future__ import print_function, division import os import sys root_dir = os.path.dirname(sys.path[0]) sys.path.append(root_dir) import numpy as np import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec from cryoio import mrc import density, cryoops import geometry import cryoem from notimplemented import correlation import pyximport; pyximport.install( setup_args={"include_dirs": np.get_include()}, reload_support=True) import sincint def demo(N=128, rad=0.5): TtoF = sincint.gentrunctofull(N=N, rad=rad) xy, trunc_xy, truncmask = geometry.gencoords(N, 2, rad, True) print('shape of TtoF:', TtoF.shape) print('slice shape:', trunc_xy.shape[0]) trunc_slice = np.arange(trunc_xy.shape[0]) sliced_image = TtoF.dot(trunc_slice).reshape(N, N) trunc_xy_idx = np.int_(trunc_xy + int(N/2)) # Compare speed for getting slices in this way new_trunc_slice = sliced_image[trunc_xy_idx[:, 0], trunc_xy_idx[:, 1]] print('error:', sum(trunc_slice - new_trunc_slice)) pol_trunc_xy = correlation.cart2pol(trunc_xy) # inside of rad # sort trunc_xy coordinates sorted_idx = np.lexsort((pol_trunc_xy[:, 1], pol_trunc_xy[:, 0])) # lexsort; first, sort rho; second, sort theta sorted_pol_trunc_xy = pol_trunc_xy[sorted_idx] # reconstuct sorted coordinates into original state reco_pol_trunc_xy = sorted_pol_trunc_xy[sorted_idx.argsort()] print('error for reconstructed coordinates:', sum(correlation.pol2cart(reco_pol_trunc_xy) - trunc_xy)) reco_trunc_slice = trunc_slice[sorted_idx.argsort()] bingo_sliced_image = TtoF.dot(reco_trunc_slice).reshape(N, N) # outside of rad xy_outside = xy[~truncmask] sliced_image_outside_rad = np.zeros((N, N)) sliced_image_outside_rad[~truncmask.reshape(N, N)] = np.arange(xy_outside.shape[0]) pol_xy_outside = correlation.cart2pol(xy_outside) outside_sorted_idx = np.lexsort((pol_xy_outside[:, 1], pol_xy_outside[:, 0])) # lexsort; first, sort rho; second, sort theta sorted_pol_xy_outside = pol_xy_outside[outside_sorted_idx] reco_pol_xy_outside = np.arange(xy_outside.shape[0])[outside_sorted_idx.argsort()] bingo_sliced_image_outside_rad = np.zeros((N, N)) bingo_sliced_image_outside_rad[~truncmask.reshape(N, N)] = reco_pol_xy_outside fig, axes = plt.subplots(2, 2) ax = axes.flatten() ax[0].imshow(sliced_image) ax[1].imshow(bingo_sliced_image) ax[2].imshow(sliced_image_outside_rad) ax[3].imshow(bingo_sliced_image_outside_rad) plt.show() def compare_interpolation(N=128, rad=1): _, trunc_xy, _ = geometry.gencoords(N, 2, rad, True) pol_trunc_xy = correlation.cart2pol(trunc_xy) sorted_idx = np.lexsort((pol_trunc_xy[:, 1], pol_trunc_xy[:, 0])) # lexsort; first, sort rho; second, sort theta sorted_pol_trunc_xy = pol_trunc_xy[sorted_idx] interpolation = ['none', 'nearest', 'nearest_decimal_1', 'nearest_half'] fig, ax = plt.subplots(nrows=len(interpolation), sharex=True) # fig, ax = plt.subplots() def round_to(n, precision): # correction = 0.5 if n >= 0 else -0.5 correction = np.ones_like(n) * 0.5 correction[n < 0] = -0.5 return np.int_(n / precision + correction) * precision def round_half(n): return round_to(n, 0.5) def get_ip_func(ip_method): if 'none' == ip_method.lower(): return lambda x: x elif 'nearest' == ip_method.lower(): return np.round elif 'nearest_decimal_1' == ip_method.lower(): return lambda x: np.round(x, 1) elif 'nearest_half' == ip_method.lower(): return round_half else: raise ValueError('please input correct interpolation method.') for i, ip in enumerate(interpolation): ip_func = get_ip_func(ip) ip_pol_xy = ip_func(sorted_pol_trunc_xy[:, 0]) unique_value, unique_index, unique_inverse, unique_counts = np.unique(ip_pol_xy, return_index=True, return_inverse=True, return_counts=True) ax[i].plot(unique_value, unique_counts, label='interpolation: {}'.format(ip)) ax[i].legend(frameon=False) ax[i].set_ylabel('counts') ax[-1].set_xlabel('radius') plt.show() def correlation_trunc_example(M): N = M.shape[0] rad = 1 proj = M.sum(axis=0) FtoT = sincint.genfulltotrunc(N=N, rad=rad) TtoF = sincint.gentrunctofull(N=N, rad=rad) trunc = FtoT.dot(proj.flatten()) corr_trunc = correlation.calc_angular_correlation(trunc, N, 1) corr_proj = TtoF.dot(corr_trunc).reshape(N, N) fig, ax = plt.subplots(1,2) ax[0].imshow(proj) ax[1].imshow(corr_proj) plt.show() def view_rad_range(N=128): fig, axes = plt.subplots(4, 5, figsize=(12.8, 8)) # , sharex=True, sharey=True, squeeze=False) rad_list = np.arange(0.1, 1.1, step=0.2) for i, rad in enumerate(rad_list): TtoF = sincint.gentrunctofull(N, rad) xy, trunc_xy, truncmask = geometry.gencoords(N, 2, rad, True) N_T = trunc_xy.shape[0] trunc =
np.arange(0, N_T)
numpy.arange
#!/usr/bin/env python # Copyright 2014-2018 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import copy import numpy from functools import reduce from pyscf import gto, lib from pyscf import scf, dft from pyscf import mp from pyscf import cc from pyscf import ao2mo from pyscf.cc import uccsd from pyscf.cc import gccsd from pyscf.cc import addons from pyscf.cc import uccsd_rdm from pyscf.fci import direct_uhf mol = gto.Mole() mol.verbose = 7 mol.output = '/dev/null' mol.atom = [ [8 , (0. , 0. , 0.)], [1 , (0. , -0.757 , 0.587)], [1 , (0. , 0.757 , 0.587)]] mol.basis = '631g' mol.build() rhf = scf.RHF(mol) rhf.conv_tol_grad = 1e-8 rhf.kernel() mf = scf.addons.convert_to_uhf(rhf) myucc = cc.UCCSD(mf).run(conv_tol=1e-10) mol_s2 = gto.Mole() mol_s2.atom = [ [8 , (0. , 0. , 0.)], [1 , (0. , -0.757 , 0.587)], [1 , (0. , 0.757 , 0.587)]] mol_s2.basis = '631g' mol_s2.spin = 2 mol_s2.verbose = 5 mol_s2.output = '/dev/null' mol_s2.build() mf_s2 = scf.UHF(mol_s2).run() eris = uccsd.UCCSD(mf_s2).ao2mo() def tearDownModule(): global mol, rhf, mf, myucc, mol_s2, mf_s2, eris mol.stdout.close() mol_s2.stdout.close() del mol, rhf, mf, myucc, mol_s2, mf_s2, eris class KnownValues(unittest.TestCase): # def test_with_df(self): # mf = scf.UHF(mol).density_fit(auxbasis='weigend').run() # mycc = cc.UCCSD(mf).run() # self.assertAlmostEqual(mycc.e_tot, -76.118403942938741, 7) def test_ERIS(self): ucc1 = cc.UCCSD(mf) nao,nmo = mf.mo_coeff[0].shape numpy.random.seed(1) mo_coeff = numpy.random.random((2,nao,nmo)) eris = cc.uccsd._make_eris_incore(ucc1, mo_coeff) self.assertAlmostEqual(lib.finger(eris.oooo), 4.9638849382825754, 11) self.assertAlmostEqual(lib.finger(eris.ovoo),-1.3623681896983584, 11) self.assertAlmostEqual(lib.finger(eris.ovov), 125.81550684442163, 11) self.assertAlmostEqual(lib.finger(eris.oovv), 55.123681017639598, 11) self.assertAlmostEqual(lib.finger(eris.ovvo), 133.48083527898248, 11) self.assertAlmostEqual(lib.finger(eris.ovvv), 59.421927525288183, 11) self.assertAlmostEqual(lib.finger(eris.vvvv), 43.556602622204778, 11) self.assertAlmostEqual(lib.finger(eris.OOOO),-407.05319440524585, 11) self.assertAlmostEqual(lib.finger(eris.OVOO), 56.284299937160796, 11) self.assertAlmostEqual(lib.finger(eris.OVOV),-287.72899895597448, 11) self.assertAlmostEqual(lib.finger(eris.OOVV),-85.484299959144522, 11) self.assertAlmostEqual(lib.finger(eris.OVVO),-228.18996145476956, 11) self.assertAlmostEqual(lib.finger(eris.OVVV),-10.715902258877399, 11) self.assertAlmostEqual(lib.finger(eris.VVVV),-89.908425473958303, 11) self.assertAlmostEqual(lib.finger(eris.ooOO),-336.65979260175226, 11) self.assertAlmostEqual(lib.finger(eris.ovOO),-16.405125847288176, 11) self.assertAlmostEqual(lib.finger(eris.ovOV), 231.59042209500075, 11) self.assertAlmostEqual(lib.finger(eris.ooVV), 20.338077193028354, 11) self.assertAlmostEqual(lib.finger(eris.ovVO), 206.48662856981386, 11) self.assertAlmostEqual(lib.finger(eris.ovVV),-71.273249852220516, 11) self.assertAlmostEqual(lib.finger(eris.vvVV), 172.47130671068496, 11) self.assertAlmostEqual(lib.finger(eris.OVoo),-19.927660309103977, 11) self.assertAlmostEqual(lib.finger(eris.OOvv),-27.761433381797019, 11) self.assertAlmostEqual(lib.finger(eris.OVvo),-140.09648311337384, 11) self.assertAlmostEqual(lib.finger(eris.OVvv), 40.700983950220547, 11) uccsd.MEMORYMIN, bak = 0, uccsd.MEMORYMIN ucc1.max_memory = 0 eris1 = ucc1.ao2mo(mo_coeff) uccsd.MEMORYMIN = bak self.assertAlmostEqual(abs(numpy.array(eris1.oooo)-eris.oooo).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.ovoo)-eris.ovoo).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.ovov)-eris.ovov).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.oovv)-eris.oovv).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.ovvo)-eris.ovvo).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.ovvv)-eris.ovvv).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.vvvv)-eris.vvvv).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.OOOO)-eris.OOOO).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.OVOO)-eris.OVOO).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.OVOV)-eris.OVOV).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.OOVV)-eris.OOVV).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.OVVO)-eris.OVVO).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.OVVV)-eris.OVVV).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.VVVV)-eris.VVVV).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.ooOO)-eris.ooOO).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.ovOO)-eris.ovOO).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.ovOV)-eris.ovOV).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.ooVV)-eris.ooVV).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.ovVO)-eris.ovVO).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.ovVV)-eris.ovVV).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.vvVV)-eris.vvVV).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.OVoo)-eris.OVoo).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.OOvv)-eris.OOvv).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.OVvo)-eris.OVvo).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.OVvv)-eris.OVvv).max(), 0, 11) # Testing the complex MO integrals def ao2mofn(mos): if isinstance(mos, numpy.ndarray) and mos.ndim == 2: mos = [mos]*4 nmos = [mo.shape[1] for mo in mos] eri_mo = ao2mo.kernel(mf._eri, mos, compact=False).reshape(nmos) return eri_mo * 1j eris1 = cc.uccsd._make_eris_incore(ucc1, mo_coeff, ao2mofn=ao2mofn) self.assertAlmostEqual(abs(eris1.oooo.imag-eris.oooo).max(), 0, 11) self.assertAlmostEqual(abs(eris1.ovoo.imag-eris.ovoo).max(), 0, 11) self.assertAlmostEqual(abs(eris1.ovov.imag-eris.ovov).max(), 0, 11) self.assertAlmostEqual(abs(eris1.oovv.imag-eris.oovv).max(), 0, 11) self.assertAlmostEqual(abs(eris1.ovvo.imag-eris.ovvo).max(), 0, 11) #self.assertAlmostEqual(abs(eris1.ovvv.imag-eris.ovvv).max(), 0, 11) #self.assertAlmostEqual(abs(eris1.vvvv.imag-eris.vvvv).max(), 0, 11) self.assertAlmostEqual(abs(eris1.OOOO.imag-eris.OOOO).max(), 0, 11) self.assertAlmostEqual(abs(eris1.OVOO.imag-eris.OVOO).max(), 0, 11) self.assertAlmostEqual(abs(eris1.OVOV.imag-eris.OVOV).max(), 0, 11) self.assertAlmostEqual(abs(eris1.OOVV.imag-eris.OOVV).max(), 0, 11) self.assertAlmostEqual(abs(eris1.OVVO.imag-eris.OVVO).max(), 0, 11) #self.assertAlmostEqual(abs(eris1.OVVV.imag-eris.OVVV).max(), 0, 11) #self.assertAlmostEqual(abs(eris1.VVVV.imag-eris.VVVV).max(), 0, 11) self.assertAlmostEqual(abs(eris1.ooOO.imag-eris.ooOO).max(), 0, 11) self.assertAlmostEqual(abs(eris1.ovOO.imag-eris.ovOO).max(), 0, 11) self.assertAlmostEqual(abs(eris1.ovOV.imag-eris.ovOV).max(), 0, 11) self.assertAlmostEqual(abs(eris1.ooVV.imag-eris.ooVV).max(), 0, 11) self.assertAlmostEqual(abs(eris1.ovVO.imag-eris.ovVO).max(), 0, 11) #self.assertAlmostEqual(abs(eris1.ovVV.imag-eris.ovVV).max(), 0, 11) #self.assertAlmostEqual(abs(eris1.vvVV.imag-eris.vvVV).max(), 0, 11) self.assertAlmostEqual(abs(eris1.OVoo.imag-eris.OVoo).max(), 0, 11) self.assertAlmostEqual(abs(eris1.OOvv.imag-eris.OOvv).max(), 0, 11) self.assertAlmostEqual(abs(eris1.OVvo.imag-eris.OVvo).max(), 0, 11) #self.assertAlmostEqual(abs(eris1.OVvv.imag-eris.OVvv).max(), 0, 11) def test_amplitudes_from_rccsd(self): e, t1, t2 = cc.RCCSD(rhf).set(conv_tol=1e-10).kernel() t1, t2 = myucc.amplitudes_from_rccsd(t1, t2) self.assertAlmostEqual(abs(t1[0]-myucc.t1[0]).max(), 0, 6) self.assertAlmostEqual(abs(t1[1]-myucc.t1[1]).max(), 0, 6) self.assertAlmostEqual(abs(t2[0]-myucc.t2[0]).max(), 0, 6) self.assertAlmostEqual(abs(t2[1]-myucc.t2[1]).max(), 0, 6) self.assertAlmostEqual(abs(t2[2]-myucc.t2[2]).max(), 0, 6) def test_uccsd_frozen(self): ucc1 = copy.copy(myucc) ucc1.frozen = 1 self.assertEqual(ucc1.nmo, (12,12)) self.assertEqual(ucc1.nocc, (4,4)) ucc1.frozen = [0,1] self.assertEqual(ucc1.nmo, (11,11)) self.assertEqual(ucc1.nocc, (3,3)) ucc1.frozen = [[0,1], [0,1]] self.assertEqual(ucc1.nmo, (11,11)) self.assertEqual(ucc1.nocc, (3,3)) ucc1.frozen = [1,9] self.assertEqual(ucc1.nmo, (11,11)) self.assertEqual(ucc1.nocc, (4,4)) ucc1.frozen = [[1,9], [1,9]] self.assertEqual(ucc1.nmo, (11,11)) self.assertEqual(ucc1.nocc, (4,4)) ucc1.frozen = [9,10,12] self.assertEqual(ucc1.nmo, (10,10)) self.assertEqual(ucc1.nocc, (5,5)) ucc1.nmo = (13,12) ucc1.nocc = (5,4) self.assertEqual(ucc1.nmo, (13,12)) self.assertEqual(ucc1.nocc, (5,4)) def test_uccsd_frozen(self): # Freeze 1s electrons frozen = [[0,1], [0,1]] ucc = cc.UCCSD(mf_s2, frozen=frozen) ucc.diis_start_cycle = 1 ecc, t1, t2 = ucc.kernel() self.assertAlmostEqual(ecc, -0.07414978284611283, 8) def test_rdm(self): nocc = 5 nvir = 7 mol = gto.M() mf = scf.UHF(mol) mf.mo_occ = numpy.zeros((2,nocc+nvir)) mf.mo_occ[:,:nocc] = 1 mycc = uccsd.UCCSD(mf) def antisym(t2): t2 = t2 - t2.transpose(0,1,3,2) t2 = t2 - t2.transpose(1,0,2,3) return t2 orbspin = numpy.zeros((nocc+nvir)*2, dtype=int) orbspin[1::2] = 1 numpy.random.seed(1) t1 = numpy.random.random((2,nocc,nvir))*.1 - .1 t2ab = numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1 t2aa = antisym(numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1) t2bb = antisym(numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1) t2 = (t2aa,t2ab,t2bb) l1 = numpy.random.random((2,nocc,nvir))*.1 - .1 l2ab = numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1 l2aa = antisym(numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1) l2bb = antisym(numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1) l2 = (l2aa,l2ab,l2bb) dm1a, dm1b = mycc.make_rdm1(t1, t2, l1, l2) dm2aa, dm2ab, dm2bb = mycc.make_rdm2(t1, t2, l1, l2) ia = orbspin == 0 ib = orbspin == 1 oa = orbspin[:nocc*2] == 0 ob = orbspin[:nocc*2] == 1 va = orbspin[nocc*2:] == 0 vb = orbspin[nocc*2:] == 1 t1 = addons.spatial2spin(t1, orbspin) t2 = addons.spatial2spin(t2, orbspin) l1 = addons.spatial2spin(l1, orbspin) l2 = addons.spatial2spin(l2, orbspin) mf1 = scf.GHF(mol) mf1.mo_occ = numpy.zeros((nocc+nvir)*2) mf.mo_occ[:,:nocc*2] = 1 mycc1 = gccsd.GCCSD(mf1) dm1 = mycc1.make_rdm1(t1, t2, l1, l2) dm2 = mycc1.make_rdm2(t1, t2, l1, l2) self.assertAlmostEqual(abs(dm1[ia][:,ia]-dm1a).max(), 0, 9) self.assertAlmostEqual(abs(dm1[ib][:,ib]-dm1b).max(), 0, 9) self.assertAlmostEqual(abs(dm2[ia][:,ia][:,:,ia][:,:,:,ia]-dm2aa).max(), 0, 9) self.assertAlmostEqual(abs(dm2[ia][:,ia][:,:,ib][:,:,:,ib]-dm2ab).max(), 0, 9) self.assertAlmostEqual(abs(dm2[ib][:,ib][:,:,ib][:,:,:,ib]-dm2bb).max(), 0, 9) def test_h2o_rdm(self): mol = mol_s2 mf = mf_s2 mycc = uccsd.UCCSD(mf) mycc.frozen = 2 ecc, t1, t2 = mycc.kernel() l1, l2 = mycc.solve_lambda() dm1a,dm1b = mycc.make_rdm1(t1, t2, l1, l2) dm2aa,dm2ab,dm2bb = mycc.make_rdm2(t1, t2, l1, l2) mo_a = mf.mo_coeff[0] mo_b = mf.mo_coeff[1] nmoa = mo_a.shape[1] nmob = mo_b.shape[1] eriaa = ao2mo.kernel(mf._eri, mo_a, compact=False).reshape([nmoa]*4) eribb = ao2mo.kernel(mf._eri, mo_b, compact=False).reshape([nmob]*4) eriab = ao2mo.kernel(mf._eri, (mo_a,mo_a,mo_b,mo_b), compact=False) eriab = eriab.reshape([nmoa,nmoa,nmob,nmob]) hcore = mf.get_hcore() h1a = reduce(numpy.dot, (mo_a.T.conj(), hcore, mo_a)) h1b = reduce(numpy.dot, (mo_b.T.conj(), hcore, mo_b)) e1 = numpy.einsum('ij,ji', h1a, dm1a) e1+= numpy.einsum('ij,ji', h1b, dm1b) e1+= numpy.einsum('ijkl,ijkl', eriaa, dm2aa) * .5 e1+= numpy.einsum('ijkl,ijkl', eriab, dm2ab) e1+= numpy.einsum('ijkl,ijkl', eribb, dm2bb) * .5 e1+= mol.energy_nuc() self.assertAlmostEqual(e1, mycc.e_tot, 7) d1 = uccsd_rdm._gamma1_intermediates(mycc, mycc.t1, mycc.t2, mycc.l1, mycc.l2) mycc.max_memory = 0 d2 = uccsd_rdm._gamma2_intermediates(mycc, mycc.t1, mycc.t2, mycc.l1, mycc.l2, True) dm2 = uccsd_rdm._make_rdm2(mycc, d1, d2, with_dm1=True, with_frozen=True) e1 = numpy.einsum('ij,ji', h1a, dm1a) e1+= numpy.einsum('ij,ji', h1b, dm1b) e1+= numpy.einsum('ijkl,ijkl', eriaa, dm2[0]) * .5 e1+= numpy.einsum('ijkl,ijkl', eriab, dm2[1]) e1+= numpy.einsum('ijkl,ijkl', eribb, dm2[2]) * .5 e1+= mol.energy_nuc() self.assertAlmostEqual(e1, mycc.e_tot, 7) def test_h4_rdm(self): mol = gto.Mole() mol.verbose = 0 mol.atom = [ ['H', ( 1.,-1. , 0. )], ['H', ( 0.,-1. ,-1. )], ['H', ( 1.,-0.5 , 0. )], ['H', ( 0.,-1. , 1. )], ] mol.charge = 2 mol.spin = 2 mol.basis = '6-31g' mol.build() mf = scf.UHF(mol).set(init_guess='1e').run(conv_tol=1e-14) ehf0 = mf.e_tot - mol.energy_nuc() mycc = uccsd.UCCSD(mf).run() mycc.solve_lambda() eri_aa = ao2mo.kernel(mf._eri, mf.mo_coeff[0]) eri_bb = ao2mo.kernel(mf._eri, mf.mo_coeff[1]) eri_ab = ao2mo.kernel(mf._eri, [mf.mo_coeff[0], mf.mo_coeff[0], mf.mo_coeff[1], mf.mo_coeff[1]]) h1a = reduce(numpy.dot, (mf.mo_coeff[0].T, mf.get_hcore(), mf.mo_coeff[0])) h1b = reduce(numpy.dot, (mf.mo_coeff[1].T, mf.get_hcore(), mf.mo_coeff[1])) efci, fcivec = direct_uhf.kernel((h1a,h1b), (eri_aa,eri_ab,eri_bb), h1a.shape[0], mol.nelec) dm1ref, dm2ref = direct_uhf.make_rdm12s(fcivec, h1a.shape[0], mol.nelec) t1, t2 = mycc.t1, mycc.t2 l1, l2 = mycc.l1, mycc.l2 rdm1 = mycc.make_rdm1(t1, t2, l1, l2) rdm2 = mycc.make_rdm2(t1, t2, l1, l2) self.assertAlmostEqual(abs(dm1ref[0] - rdm1[0]).max(), 0, 6) self.assertAlmostEqual(abs(dm1ref[1] - rdm1[1]).max(), 0, 6) self.assertAlmostEqual(abs(dm2ref[0] - rdm2[0]).max(), 0, 6) self.assertAlmostEqual(abs(dm2ref[1] - rdm2[1]).max(), 0, 6) self.assertAlmostEqual(abs(dm2ref[2] - rdm2[2]).max(), 0, 6) def test_eris_contract_vvvv_t2(self): mol = gto.Mole() nocca, noccb, nvira, nvirb = 5, 4, 12, 13 nvira_pair = nvira*(nvira+1)//2 nvirb_pair = nvirb*(nvirb+1)//2 numpy.random.seed(9) t2 =
numpy.random.random((nocca,noccb,nvira,nvirb))
numpy.random.random
""" Classic cart-pole system implemented by <NAME> et al. Copied from http://incompleteideas.net/sutton/book/code/pole.c permalink: https://perma.cc/C9ZM-652R """ import math import gym from gym import spaces, logger from gym.utils import seeding import numpy as np from scipy.integrate import ode g = 9.8 # gravity force_mag = 10.0 tau = 0.02 # seconds between state updates # cart m_cart = 1 # pole 1 l_1 = 1 # length m_1 = 0.1 # mass # pole 2 l_2 = 1 # length m_2 = 0.1 # mass def f(time, state, input): x = state[0] x_dot = state[1] theta_1 = state[2] theta_1_dot = state[3] theta_2 = state[4] theta_2_dot = state[5] x_dot_dot = ((l_1 * l_2 * m_2 * np.sin(theta_1 - theta_2) * theta_1_dot ** 2 + g * l_2 * m_2 * np.sin(theta_2)) * (m_1 * np.cos(theta_2) + m_2 * np.cos(theta_2) - m_1 * np.cos(theta_1 - theta_2) * np.cos(theta_1) - m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_1))) / (l_2 * m_2 ** 2 * np.cos(theta_1 - theta_2) ** 2 - l_2 * m_2 ** 2 - l_2 * m_1 ** 2 - 2 * l_2 * m_1 * m_2 - l_2 * m_1 * m_cart - l_2 * m_2 * m_cart + l_2 * m_1 ** 2 * np.cos(theta_1) ** 2 + l_2 * m_2 ** 2 * np.cos(theta_1) ** 2 + l_2 * m_2 ** 2 * np.cos(theta_2) ** 2 + l_2 * m_1 * m_2 * np.cos(theta_1 - theta_2) ** 2 + l_2 * m_2 * m_cart * np.cos(theta_1 - theta_2) ** 2 + 2 * l_2 * m_1 * m_2 * np.cos(theta_1) ** 2 + l_2 * m_1 * m_2 * np.cos(theta_2) ** 2 - 2 * l_2 * m_2 ** 2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2) - 2 * l_2 * m_1 * m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)) \ + ((- l_1 * l_2 * m_2 * np.sin(theta_1 - theta_2) * theta_2_dot ** 2 + g * l_1 * np.sin(theta_1) * (m_1 + m_2)) * (m_1 * np.cos(theta_1) + m_2 * np.cos(theta_1) - m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_2))) / (l_1 * m_2 ** 2 * np.cos(theta_1 - theta_2) ** 2 - l_1 * m_2 ** 2 - l_1 * m_1 ** 2 - 2 * l_1 * m_1 * m_2 - l_1 * m_1 * m_cart - l_1 * m_2 * m_cart + l_1 * m_1 ** 2 * np.cos(theta_1) ** 2 + l_1 * m_2 ** 2 * np.cos(theta_1) ** 2 + l_1 * m_2 ** 2 * np.cos(theta_2) ** 2 + l_1 * m_1 * m_2 * np.cos(theta_1 - theta_2) ** 2 + l_1 * m_2 * m_cart * np.cos(theta_1 - theta_2) ** 2 + 2 * l_1 * m_1 * m_2 * np.cos(theta_1) ** 2 + l_1 * m_1 * m_2 * np.cos(theta_2) ** 2 - 2 * l_1 * m_2 ** 2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2) - 2 * l_1 * m_1 * m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)) \ - ((- m_2 * np.cos(theta_1 - theta_2) ** 2 + m_1 + m_2) *(l_1 * np.sin(theta_1) * (m_1 + m_2) * theta_1_dot ** 2 + l_2 * m_2 * np.sin(theta_2) * theta_2_dot ** 2 + input)) / (m_1 ** 2 * np.cos(theta_1) ** 2 - m_1 * m_cart - m_2 * m_cart - 2 * m_1 * m_2 + m_2 ** 2 * np.cos(theta_1) ** 2 + m_2 ** 2 * np.cos(theta_2) ** 2 + m_2 ** 2 * np.cos(theta_1 - theta_2) ** 2 - m_1 ** 2 - m_2 ** 2 + 2 * m_1 * m_2 * np.cos(theta_1) ** 2 + m_1 * m_2 * np.cos(theta_2) ** 2 + m_1 * m_2 * np.cos(theta_1 - theta_2) ** 2 + m_2 * m_cart * np.cos(theta_1 - theta_2) ** 2 - 2 * m_2 ** 2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2) - 2 * m_1 * m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)) theta_1_dot_dot = ((m_1 * np.cos(theta_1) + m_2 * np.cos(theta_1) - m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_2)) * (l_1 * np.sin(theta_1) * (m_1 + m_2) * theta_1_dot ** 2 + l_2 * m_2 * np.sin(theta_2) * theta_2_dot ** 2 + input)) \ / (l_1 * m_2 ** 2 * np.cos(theta_1 - theta_2) ** 2 - l_1 * m_2 ** 2 - l_1 * m_1 ** 2 - 2 * l_1 * m_1 * m_2 - l_1 * m_1 * m_cart - l_1 * m_2 * m_cart + l_1 * m_1 ** 2 * np.cos(theta_1) ** 2 + l_1 * m_2 ** 2 * np.cos(theta_1) ** 2 + l_1 * m_2 ** 2 * np.cos(theta_2) ** 2 + l_1 * m_1 * m_2 * np.cos(theta_1 - theta_2) ** 2 + l_1 * m_2 * m_cart * np.cos(theta_1 - theta_2) ** 2 + 2 * l_1 * m_1 * m_2 * np.cos(theta_1) ** 2 + l_1 * m_1 * m_2 * np.cos(theta_2) ** 2 - 2 * l_1 * m_2 ** 2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2) - 2 * l_1 * m_1 * m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)) \ - ((- l_1 * l_2 * m_2 * np.sin(theta_1 - theta_2) * theta_2_dot ** 2 + g * l_1 * np.sin(theta_1) * (m_1 + m_2)) * (- m_2 * np.cos(theta_2) ** 2 + m_1 + m_2 + m_cart)) \ / (l_1 ** 2 * m_1 ** 2 * np.cos(theta_1) ** 2 - l_1 ** 2 * m_2 ** 2 - 2 * l_1 ** 2 * m_1 * m_2 - l_1 ** 2 * m_1 * m_cart - l_1 ** 2 * m_2 * m_cart - l_1 ** 2 * m_1 ** 2 + l_1 ** 2 * m_2 ** 2 * np.cos(theta_1) ** 2 + l_1 ** 2 * m_2 ** 2 * np.cos(theta_2) ** 2 + l_1 ** 2 * m_2 ** 2 * np.cos(theta_1 - theta_2) ** 2 + 2 * l_1 ** 2 * m_1 * m_2 * np.cos(theta_1) ** 2 + l_1 ** 2 * m_1 * m_2 * np.cos(theta_2) ** 2 + l_1 ** 2 * m_1 * m_2 * np.cos(theta_1 - theta_2) ** 2 + l_1 ** 2 * m_2 * m_cart * np.cos(theta_1 - theta_2) ** 2 - 2 * l_1 ** 2 * m_2 ** 2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2) - 2 * l_1 ** 2 * m_1 * m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)) \ + ((l_1 * l_2 * m_2 * np.sin(theta_1 - theta_2) * theta_1_dot ** 2 + g * l_2 * m_2 * np.sin(theta_2)) * (m_1 * np.cos(theta_1 - theta_2) + m_2 * np.cos(theta_1 - theta_2) + m_cart * np.cos(theta_1 - theta_2) - m_1 * np.cos(theta_1) * np.cos(theta_2) - m_2 * np.cos(theta_1) * np.cos(theta_2))) / (l_1 * l_2 * m_1 ** 2 * np.cos(theta_1) ** 2 - l_1 * l_2 * m_2 ** 2 - 2 * l_1 * l_2 * m_1 * m_2 - l_1 * l_2 * m_1 * m_cart - l_1 * l_2 * m_2 * m_cart - l_1 * l_2 * m_1 ** 2 + l_1 * l_2 * m_2 ** 2 * np.cos(theta_1) ** 2 + l_1 * l_2 * m_2 ** 2 * np.cos(theta_2) ** 2 + l_1 * l_2 * m_2 ** 2 * np.cos(theta_1 - theta_2) ** 2 + 2 * l_1 * l_2 * m_1 * m_2 * np.cos(theta_1) ** 2 + l_1 * l_2 * m_1 * m_2 * np.cos(theta_2) ** 2 + l_1 * l_2 * m_1 * m_2 * np.cos(theta_1 - theta_2) ** 2 + l_1 * l_2 * m_2 * m_cart * np.cos(theta_1 - theta_2) ** 2 - 2 * l_1 * l_2 * m_2 ** 2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2) - 2 * l_1 * l_2 * m_1 * m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)) theta_2_dot_dot = ((- l_1 * l_2 * m_2 * np.sin(theta_1 - theta_2) * theta_2_dot ** 2 + g * l_1 * np.sin(theta_1) * (m_1 + m_2)) * (m_1 * np.cos(theta_1 - theta_2) + m_2 * np.cos(theta_1 - theta_2) + m_cart * np.cos(theta_1 - theta_2) - m_1 * np.cos(theta_1) * np.cos(theta_2) - m_2 * np.cos(theta_1) * np.cos(theta_2))) / (l_1 * l_2 * m_1 ** 2 * np.cos(theta_1) ** 2 - l_1 * l_2 * m_2 ** 2 - 2 * l_1 * l_2 * m_1 * m_2 - l_1 * l_2 * m_1 * m_cart - l_1 * l_2 * m_2 * m_cart - l_1 * l_2 * m_1 ** 2 + l_1 * l_2 * m_2 ** 2 * np.cos(theta_1) ** 2 + l_1 * l_2 * m_2 ** 2 * np.cos(theta_2) ** 2 + l_1 * l_2 * m_2 ** 2 * np.cos(theta_1 - theta_2) ** 2 + 2 * l_1 * l_2 * m_1 * m_2 * np.cos(theta_1) ** 2 + l_1 * l_2 * m_1 * m_2 *
np.cos(theta_2)
numpy.cos
#CSTAT+ A GPU-accelerated spatial pattern analysis algorithm for high-resolution 2D/3D hydrologic connectivity using array vectorization and convolutional neural network #Author: <NAME>, <NAME> #Department of Earth, Atmospheric and Planetary Sciences, Purdue University, 550 Stadium Mall Dr, West Lafayette, IN 47907 USA. #Email: <EMAIL>; Alternative: <EMAIL> #This is the omnidirectional version: CSTAT+/OMNI import os from osgeo import gdal import numpy as np import copy as cp from numpy import genfromtxt as gft from scipy.ndimage.measurements import label from itertools import combinations_with_replacement,product from mxnet import nd,gpu from timeit import default_timer as timer import pandas as pd #Binarize pattern def prep(expe0,threshold,NoData): #Provide threshold for High/Low, usually the depth of shallow sheetflow expe1=cp.deepcopy(expe0) expe2=cp.deepcopy(expe0) expe1[(expe1>=threshold)]=1 expe1[(expe1<threshold)]=0 expe2[(expe2==NoData)]=-1 expe2[(expe2>0)]=0 connection_structure = np.array([[1,1,1],[1,1,1],[1,1,1]]) expela, num_features =label (expe1,structure=connection_structure) expe3=expe2+expela return (expe3) def itercontrol(regions,k,bins,dibins,dibins4,binnum): #Initiate empty array for storing histogram for directions, distances, and number of counted pairs in each distance range bin co0=nd.zeros(binnum-1,gpu(0),dtype="float32") codi0=nd.zeros((4,binnum-1),gpu(0),dtype="float32") count0=nd.zeros(binnum-1,gpu(0),dtype="float32") count4=nd.zeros((4,binnum-1),gpu(0),dtype="float32") co4=nd.zeros((4,binnum-1),gpu(0),dtype="float32") bins=nd.array(bins,gpu(0)) dibins=nd.array(dibins,gpu(0)) dibins4=nd.array(dibins4,gpu(0)) if k==2: #Create segment index for the input array to meet the memory requirement imax=list(range(int(regions.shape[0]/broadcdp)+(regions.shape[0]%broadcdp!=0))) #Combinations with repeated indicies iterator=list(combinations_with_replacement(imax,2)) for i in iterator: if i[0]==i[1]: vout=distanceAA2(regions,i,binnum,dibins,dibins4) co0+=vout[0] codi0+=vout[1] count0+=vout[2] co4+=vout[3] count4+=vout[4] else: vout=distanceAA1(regions,i,binnum,dibins,dibins4) co0+=vout[0] codi0+=vout[1] count0+=vout[2] co4+=vout[3] count4+=vout[4] return (co0.asnumpy(),codi0.asnumpy(),count0.asnumpy(),co4.asnumpy(),count4.asnumpy()) elif k==1: #Create segment index for the input array to meet the memory requirement imax=list(range(int(regions.shape[0]/broadcdp)+(regions.shape[0]%broadcdp!=0))) #Combinations with repeated indicies iterator=list(combinations_with_replacement(imax,2)) for i in iterator: if i[0]==i[1]: count0+=distance2(regions,i,binnum,bins) else: count0+=distance1(regions,i,binnum,bins) return (count0.asnumpy()) else: #Unpack the tuple regions_high,regions_low=regions #Create segment index for the input array to meet the memory requirement imax_high=list(range(int(regions_high.shape[0]/broadcdp)+(regions_high.shape[0]%broadcdp!=0))) imax_low=list(range(int(regions_low.shape[0]/broadcdp)+(regions_low.shape[0]%broadcdp!=0))) #Combinations with repeated indicies iterator=list(product(imax_high,imax_low)) for i in iterator: count0+=distance11(regions_high,regions_low,i,binnum,bins) return (count0.asnumpy()) def distanceAA1(regions,i,binnum,dibins,dibins4): #Initiate empty array for storing histogram for directions, distances, and number of counted pairs in each distance range bin co0=nd.zeros(binnum-1,gpu(0),dtype="float32") codi0=nd.zeros((5,binnum-1),gpu(0),dtype="float32") count0=nd.zeros(binnum-1,gpu(0),dtype="float32") count4=nd.zeros((5,binnum-1),gpu(0),dtype="float32") co4=nd.zeros((5,binnum-1),gpu(0),dtype="float32") #Calculate index coordinates and directions by chuncks a=regions[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions.shape[0]),:] b=regions[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions.shape[0]),:] a1=nd.array(a,gpu(0)) b1=nd.array(b,gpu(0)) a1_b1=(nd.expand_dims(a1,axis=1)-b1).reshape((-1,2)) x1_x2=a1_b1[:,0] y1_y2=a1_b1[:,1] labels=nd.zeros(x1_x2.shape[0],gpu(0),dtype="float32") sdi0=(nd.degrees(nd.arctan((y1_y2)/(x1_x2)))+90).reshape((-1,)) ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,)) #Change 0 to 180 so it can apply sum of boolean mask without losing values sdi0=nd.where(condition=(sdi0==0),x=labels+180,y=sdi0) #Store sum of distances co0 and histogram of directions in each range bin for p in range (0,binnum-1): booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1])) count0[p]+=nd.nansum(booleanmask) co0[p]+=nd.nansum(ldis*booleanmask) #Exclue values not in distance range bin sdi1=nd.where(condition=(booleanmask==0),x=labels-1,y=sdi0) for q in range (0,5): booleanmaskdi=nd.equal((sdi1>=dibins[q]),(sdi1<dibins[q+1])) codi0[q,p]+=nd.nansum(booleanmaskdi) for k in range (0,5): booleanmaskdi=nd.equal((sdi0>=dibins4[k]),(sdi0<dibins4[k+1])) ldis0=ldis*booleanmaskdi for l in range (0,binnum-1): booleanmask=nd.equal((ldis0>=bins[l]),(ldis0<bins[l+1])) count4[k,l]+=nd.nansum(booleanmask) co4[k,l]+=nd.nansum(ldis0*booleanmask) codi0[0,:]+=codi0[4,:] codi0=codi0[0:4,:] count4[0,:]+=count4[4,:] count4=count4[0:4,:] co4[0,:]+=co4[4,:] co4=co4[0:4,:] return(co0,codi0,count0,co4,count4) def distanceAA2(regions,i,binnum,dibins,dibins4): #Initiate empty array for storing histogram for directions, distances, and number of counted pairs in each distance range bin co0=nd.zeros(binnum-1,gpu(0),dtype="float32") codi0=nd.zeros((5,binnum-1),gpu(0),dtype="float32") count0=nd.zeros(binnum-1,gpu(0),dtype="float32") count4=nd.zeros((5,binnum-1),gpu(0),dtype="float32") co4=nd.zeros((5,binnum-1),gpu(0),dtype="float32") seed=nd.zeros((1,2),gpu(0)) #Calculate index coordinates and directions by chuncks a=regions[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions.shape[0]),:] b=regions[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions.shape[0]),:] a1=nd.array(a,gpu(0)) b1=nd.array(b,gpu(0)) # print ("a1",a1,"b1",b1) for ii in range (a1.shape[0]-1): a1_b1=(nd.expand_dims(a1[ii].reshape((1,2)),axis=1)-b1[ii+1:,:]).reshape((a1[ii+1:,:].shape[0],2)) seed=nd.concat(seed,a1_b1,dim=0) if seed.shape[0]>1: x1_x2=seed[1:,0] y1_y2=seed[1:,1] labels=nd.zeros(x1_x2.shape[0],gpu(0),dtype="float32") sdi0=(nd.degrees(nd.arctan((y1_y2)/(x1_x2)))+90).reshape((-1,)) ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,)) #Change 0 to 180 so it can apply sum of boolean mask without losing values sdi0=nd.where(condition=(sdi0==0),x=labels+180,y=sdi0) #Store sum of distances co0 and histogram of directions in each range bin for p in range (0,binnum-1): booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1])) count0[p]+=nd.nansum(booleanmask) co0[p]+=nd.nansum(ldis*booleanmask) #Exclue values not in distance range bin sdi1=nd.where(condition=(booleanmask==0),x=labels-1,y=sdi0) for q in range (0,5): booleanmaskdi=nd.equal((sdi1>=dibins[q]),(sdi1<dibins[q+1])) codi0[q,p]+=nd.nansum(booleanmaskdi) for k in range (0,5): booleanmaskdi=nd.equal((sdi0>=dibins4[k]),(sdi0<dibins4[k+1])) ldis0=ldis*booleanmaskdi for l in range (0,binnum-1): booleanmask=nd.equal((ldis0>=bins[l]),(ldis0<bins[l+1])) count4[k,l]+=nd.nansum(booleanmask) co4[k,l]+=nd.nansum(ldis0*booleanmask) codi0[0,:]+=codi0[4,:] codi0=codi0[0:4,:] count4[0,:]+=count4[4,:] count4=count4[0:4,:] co4[0,:]+=co4[4,:] co4=co4[0:4,:] return(co0,codi0,count0,co4,count4) #Full permutation distance computation def distance1(regions,i,binnum,bins): #Initiate empty array for storing the number of counted pairs in each distance range bin count0=nd.zeros(binnum-1,gpu(0),dtype="float32") #Calculate index coordinates and directions by chuncks a=regions[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions.shape[0]),:] b=regions[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions.shape[0]),:] a1=nd.array(a,gpu(0)) b1=nd.array(b,gpu(0)) a1_b1=(nd.expand_dims(a1,axis=1)-b1).reshape((-1,2)) x1_x2=a1_b1[:,0] y1_y2=a1_b1[:,1] ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,)) for p in range (0,binnum-1): booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1])) count0[p]+=nd.nansum(booleanmask) return(count0) #Full permutation distance computation between different regions: high and low def distance11(regions_high,regions_low,i,binnum,bins): #Initiate empty array for storing the number of counted pairs in each distance range bin count0=nd.zeros(binnum-1,gpu(0),dtype="float32") #Calculate index coordinates and directions by chuncks a=regions_high[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions_high.shape[0]),:] b=regions_low[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions_low.shape[0]),:] a1=nd.array(a,gpu(0)) b1=nd.array(b,gpu(0)) a1_b1=(nd.expand_dims(a1,axis=1)-b1).reshape((-1,2)) x1_x2=a1_b1[:,0] y1_y2=a1_b1[:,1] ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,)) for p in range (0,binnum-1): booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1])) count0[p]+=nd.nansum(booleanmask) return(count0) #Full combination distance computation def distance2(regions,i,binnum,bins): #Initiate empty array for storing the number of counted pairs in each distance range bin count0=nd.zeros(binnum-1,gpu(0),dtype="float32") seed=nd.zeros((1,2),gpu(0)) #Calculate index coordinates and directions by chuncks a=regions[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions.shape[0]),:] b=regions[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions.shape[0]),:] a1=nd.array(a,gpu(0)) b1=nd.array(b,gpu(0)) for ii in range (a1.shape[0]-1): a1_b1=(nd.expand_dims(a1[ii].reshape((1,2)),axis=1)-b1[ii+1:,:]).reshape((a1[ii+1:,:].shape[0],2)) seed=nd.concat(seed,a1_b1,dim=0) if seed.shape[0]>1: x1_x2=seed[1:,0] y1_y2=seed[1:,1] ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,)) for p in range (0,binnum-1): booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1])) count0[p]+=nd.nansum(booleanmask) return(count0) def omni(taoh_W,mean_d,cardh_his,taoh_W4,mean_d4,binnum): #Compute OMNI OMNIW=np.zeros(binnum,dtype="float32") OMNIW4=np.zeros((4,binnum),dtype="float32") #Convert Nan to zero to avoid issues taoh_W1=np.nan_to_num(taoh_W) mean_d1=np.nan_to_num(mean_d) taoh_W41=np.nan_to_num(taoh_W4) mean_d41=np.nan_to_num(mean_d4) for j in range (binnum-1): if taoh_W1[j+1]!=0: OMNIW[0]+=(taoh_W1[j]+taoh_W1[j+1])*(mean_d1[j+1]-mean_d1[j])*0.5 for k in range (4): for l in range (binnum-1): if taoh_W41[k,l+1]!=0: OMNIW4[k,0]+=(taoh_W41[k,l]+taoh_W41[k,l+1])*(mean_d41[k,l+1]-mean_d41[k,l])*0.5 results=np.vstack((taoh_W1,mean_d1,OMNIW,cardh_his)) results4=np.vstack((taoh_W41,mean_d41,OMNIW4)) return (results,results4) def compu(flowpattern,bins,dibins,dibins4,binnum,gt): #Initiate empty array for storing histogram for directions, distances, and number of counted pairs in each distance range bin coAA=np.zeros((1,binnum-1),dtype="float32") codiAA=np.zeros((4,binnum-1),dtype="float32") countAA=np.zeros(binnum-1) countAZ=np.zeros(binnum-1) count4AA=
np.zeros((4,binnum-1),dtype="float32")
numpy.zeros
""" Reads either pickle or mat files and plots the results. -- <EMAIL> -- <EMAIL> Usage: python plotting.py --filelist <file containing list of pickle or mat file paths> python plotting.py --file <pickle or mat file path> """ from __future__ import division # pylint: disable=invalid-name # pylint: disable=redefined-builtin # pylint: disable=too-many-locals import os import pickle import argparse import warnings import matplotlib.pyplot as plt import matplotlib from scipy.io import loadmat import numpy as np matplotlib.rcParams['mathtext.fontset'] = 'custom' matplotlib.rcParams['mathtext.rm'] = 'Bitstream Vera Sans' matplotlib.rcParams['mathtext.it'] = 'Bitstream Vera Sans:italic' matplotlib.rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold' matplotlib.rcParams['mathtext.fontset'] = 'stix' matplotlib.rcParams['font.family'] = 'STIXGeneral' def rgba(red, green, blue, a): '''rgba: generates matplotlib compatible rgba values from html-style rgba values ''' return (red / 255.0, green / 255.0, blue / 255.0, a) def hex(hexstring): '''hex: generates matplotlib-compatible rgba values from html-style hex colors ''' if hexstring[0] == '#': hexstring = hexstring[1:] red = int(hexstring[:2], 16) green = int(hexstring[2:4], 16) blue = int(hexstring[4:], 16) return rgba(red, green, blue, 1.0) def transparent(red, green, blue, _, opacity=0.5): '''transparent: converts a rgba color to a transparent opacity ''' return (red, green, blue, opacity) def read_results(file_path): """reads experiment result data from a '.m' file :file_path: the path to the file :returns: a dataframe object with all the various pieces of data """ if file_path.endswith('.mat'): results = loadmat(file_path) elif file_path.endswith('.p'): with open(file_path, 'rb') as pickleF: res = pickle.load(pickleF) pickleF.close() results = {} for key in list(res.keys()): if not hasattr(res[key], '__len__'): results[key] = np.array(res[key]) elif isinstance(res[key], str): results[key] = np.array(res[key]) elif isinstance(res[key], list): results[key] = np.array(res[key]) elif isinstance(res[key], np.ndarray): val = np.zeros(res[key].shape, dtype=res[key].dtype) for idx, x in np.ndenumerate(res[key]): if isinstance(x, list): val[idx] = np.array(x) else: val[idx] = x results[key] = val else: results[key] = res[key] else: raise ValueError('Wrong file format. It has to be either mat or pickle file') return results def get_plot_info( meth_curr_opt_vals, cum_costs, meth_costs, grid_pts, outlier_frac, init_opt_vals ): """generates means and standard deviation for the method's output """ num_experiments = len(meth_curr_opt_vals) with warnings.catch_warnings(): warnings.simplefilter(action='ignore', category=FutureWarning) idx = np.where(meth_curr_opt_vals == '-') if idx[0].size != 0: num_experiments = idx[0][0] outlier_low_idx = max(np.round(outlier_frac * num_experiments), 1) outlier_high_idx = min( num_experiments, int(num_experiments - np.rint(outlier_frac * num_experiments)) ) inlier_idx = np.arange(outlier_low_idx, outlier_high_idx) num_grid_pts = len(grid_pts) grid_vals = np.zeros((num_experiments, num_grid_pts)) for exp_iter in range(num_experiments): if cum_costs is None: curr_cum_costs = np.cumsum(meth_costs[exp_iter]) else: curr_cum_costs = cum_costs[exp_iter] if init_opt_vals is not None: opt_vals = np.concatenate((np.array([init_opt_vals[exp_iter]]), np.squeeze(meth_curr_opt_vals[exp_iter])), axis=0) curr_cum_costs = np.concatenate((np.array([0]), np.squeeze(curr_cum_costs)), axis=0) else: opt_vals = meth_curr_opt_vals[exp_iter] interp = np.interp(grid_pts, curr_cum_costs.flatten(), opt_vals.flatten()) grid_vals[exp_iter, :] = np.maximum.accumulate(interp) sorted_grid_vals = np.sort(grid_vals, axis=0) inlier_grid_vals = sorted_grid_vals[inlier_idx, :] def mean_and_std(arr1d): """ Returns mean and standard deviation.""" finite_arr1d = arr1d[np.isfinite(arr1d)] if finite_arr1d.size / arr1d.size >= 0.4: return np.array([np.mean(finite_arr1d), np.std(finite_arr1d) / np.sqrt(arr1d.size)]) return np.array([np.NaN] * 2) res = np.apply_along_axis(mean_and_std, 0, inlier_grid_vals) return (res[0, :], res[1, :]) def gen_curves( plot_order, plot_legends, results, x_label, y_label, plot_markers, plot_line_markers, plot_colors, x_bounds=None, outlier_frac=0.1, set_legend=True, log_y=False, log_x=False, plot_title=None, study_name=None, num_workers=None, time_distro_str=None, fill_error=False, err_bar_freq=5, plot_type='plot' ): # pylint: disable=too-many-arguments # pylint: disable=too-many-branches # pylint: disable=unused-argument # pylint: disable=unused-variable """Plots the curves given the experiment result data """ NUM_GRID_PTS = 100 NUM_ERR_BARS = 10 LINE_WIDTH = 2 num_methods, num_experiments = results['curr_opt_vals'].shape methods = [str(method).strip() for method in results['methods']] if x_bounds is None or x_bounds == []: x_bounds = [0.0,
np.asscalar(results['max_capital'])
numpy.asscalar
# 2020.05.17 import numpy as np import copy from sklearn.cluster import KMeans from sklearn.metrics import accuracy_score from sklearn.metrics.pairwise import euclidean_distances from mylearner import myLearner class HierNode(): def __init__(self, learner, num_class, num_cluster, metric, isleaf=False, id='R'): self.learner = myLearner(learner=learner, num_class=num_class) self.kmeans = KMeans(n_clusters=num_cluster) self.num_cluster = num_cluster self.metric = metric self.isleaf = isleaf self.id = id def metric_(self, X, Y): if 'func' in self.metric.keys(): return self.metric['func'](X, Y, self.metric) if X.shape[0] < self.num_cluster * self.metric['min_num_sample']: return True return False def fit(self, X, Y): self.kmeans.fit(X) if self.metric_(X, Y) == True: self.isleaf = True if self.isleaf == True: self.learner.fit(X, Y) return self def predict(self, X): if self.isleaf == True: try: prob = self.learner.predict_proba(X) except: prob = self.learner.predict(X) return prob else: return self.kmeans.predict(X) # cluster on prob vector from learner instead of raw feature class HierNode_fancy(): def __init__(self, learner, num_class, num_cluster, metric, isleaf=False, id='R'): self.learner = myLearner(learner=learner, num_class=num_class) self.kmeans = KMeans(n_clusters=num_cluster) self.num_cluster = num_cluster self.metric = metric self.isleaf = isleaf self.id = id def metric_(self, X, Y): if 'func' in self.metric.keys(): return self.metric['func'](X, Y, self.metric) if X.shape[0] < self.num_cluster * self.metric['min_num_sample']: return True if self.learner.score(X, Y) > self.metric['purity']: return True return False def fit(self, X, Y): self.learner.fit(X, Y) try: prob = self.learner.predict_proba(X) except: prob = self.learner.predict(X) if self.metric_(X, Y) == True: self.isleaf = True if self.isleaf == False: self.kmeans.fit(prob) def predict(self, X): try: prob = self.learner.predict_proba(X) except: prob = self.learner.predict(X) if self.isleaf == True: return prob else: return self.kmeans.predict(prob) # query labels, reduce number of simple samples class HierNode_query(): def __init__(self, learner, num_class, num_cluster, metric, isleaf=False, id='R'): self.learner = myLearner(learner=learner, num_class=num_class) self.kmeans = KMeans(n_clusters=num_cluster) self.num_cluster = num_cluster self.metric = metric self.isleaf = isleaf self.Y = [] self.X = [] self.num_class = num_class self.hist = np.zeros((num_class)) self.uselearner = False self.majority = -1 self.id = id def metric_(self, X): if 'func' in self.metric.keys(): return self.metric['func'](X, Y, self.metric) if X.shape[0] < self.num_cluster * self.metric['min_num_sample']: return True l2 = euclidean_distances(X, np.mean(X, axis=0, keepdims=True)) if np.mean(l2) < self.metric['mse']: return True return False def query_(self, X, Y, i): l2 = euclidean_distances(X, self.X) if i == 0: idx = np.argmin(l2.reshape(-1)) self.Y =
np.array([Y[idx]])
numpy.array
import os import sys import numpy as np import open3d as o3d from pykdtree.kdtree import KDTree from sklearn.cluster import DBSCAN from scripts.kitti.map_builder import KittiLoader, load_poses, load_calib_matrix, SSEAnnotation, cloud_to_map def save_debug_pcd(map_pcd: o3d.geometry.PointCloud, label_indices, label, color_indices: list): pcd = o3d.geometry.PointCloud() pcd_on_map = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(np.asarray(map_pcd.points)[label_indices]) pcd_on_map.points = o3d.utility.Vector3dVector(np.asarray(map_pcd.points)) pcd.paint_uniform_color([0, 0, 0]) pcd_on_map.paint_uniform_color([0, 0, 0]) colors = np.random.rand(len(color_indices), 3) tmp_colors = np.asarray(pcd.colors) tmp_map_colors = np.asarray(pcd_on_map.colors) for ind, color_ind in enumerate(color_indices): color = colors[ind] tmp_colors[color_ind] = color tmp_map_colors[label_indices[color_ind]] = color pcd.colors = o3d.utility.Vector3dVector(tmp_colors) pcd_on_map.colors = o3d.utility.Vector3dVector(tmp_map_colors) o3d.io.write_point_cloud(os.path.join("debug", "{}.pcd".format(label)), pcd) o3d.io.write_point_cloud(os.path.join("debug_map", "{}.pcd".format(label)), pcd_on_map) def visualize_pcd_labels(pcd: o3d.geometry.PointCloud, labels: np.array, filename: str = None): colors = np.concatenate([np.asarray([[0, 0, 0]]), np.random.rand(np.max(labels), 3)]) pcd_for_vis = o3d.geometry.PointCloud() pcd_for_vis.points = o3d.utility.Vector3dVector(np.asarray(pcd.points)) pcd_for_vis.paint_uniform_color([0, 0, 0]) pcd_for_vis.colors = o3d.utility.Vector3dVector(colors[labels]) if filename is None: o3d.visualization.draw_geometries([pcd_for_vis]) else: o3d.io.write_point_cloud(filename, pcd_for_vis) def dbscan_labels(pcd: o3d.geometry.PointCloud, labels: np.array) -> np.array: unique_labels, labels_in_unique_indices = np.unique(labels, return_inverse=True) result_labels = np.zeros_like(labels) problem_counter = 0 many_clusters = [] cluster_sizes = [] full_minus_one = 0 for label_index, label in enumerate(unique_labels): if label == 0: continue label_indices = np.where(labels_in_unique_indices == label_index)[0] label_points =
np.asarray(pcd.points)
numpy.asarray
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import numpy as np import itertools rng = np.random.RandomState(42) def svdw(m): n = m.shape[0] assert m.shape == (n, n) u, s, vt = np.linalg.svd(m) w = u @ vt assert np.allclose(u.T @ u, np.eye(n)) assert np.allclose(w.T @ w, np.eye(n)) assert np.allclose(u @ np.diag(s) @ u.T @ w, m) return u, s, w def check_eq(msg, a, b): diff = np.abs(a - b).max() assert diff < 1e-5, (msg, diff) def svdw_jacobian(M, u, s, w): n = M.shape[0] assert M.shape == u.shape == w.shape == (n, n) v = w.T @ u dsdm = np.empty((n, n*n), dtype=M.dtype) for i in range(n): for j in range(n): for k in range(n): dsdm[i, j*n+k] = u.T[i, j] * v[k, i] dwdy = np.empty((n*n, n*n), dtype=M.dtype) dydm = np.empty_like(dwdy) dudx = np.empty_like(dwdy) dxdm = np.empty_like(dwdy) for i, j, k, l in itertools.product(range(n), range(n), range(n), range(n)): cij = u.T[i, k] * v[l, j] cji = u.T[j, k] * v[l, i] dydm[i*n+j, k*n+l] = 0 if i == j else (cij - cji) / (s[i] + s[j]) dwdy[i*n+j, k*n+l] = u[i, k] * v.T[l, j] dudx[i*n+j, k*n+l] = 0 if l != j else u[i, k] dxdm[i*n+j, k*n+l] = 0 if i == j else ( cij * s[j] + cji * s[i]) / (s[j]**2 - s[i]**2) return dudx @ dxdm, dsdm, dwdy @ dydm def svdw_jacobian_num(M, u, s, w, eps=1e-4): n = M.shape[0] assert M.shape == (n, n) dudm = np.zeros((n*n, n*n), dtype=M.dtype) dsdm =
np.zeros((n, n*n), dtype=M.dtype)
numpy.zeros
from __future__ import print_function, division, absolute_import import copy import numpy as np import skimage.draw import skimage.measure from .. import imgaug as ia from .utils import normalize_shape, project_coords # TODO functions: square(), to_aspect_ratio(), contains_point() class BoundingBox(object): """Class representing bounding boxes. Each bounding box is parameterized by its top left and bottom right corners. Both are given as x and y-coordinates. The corners are intended to lie inside the bounding box area. As a result, a bounding box that lies completely inside the image but has maximum extensions would have coordinates ``(0.0, 0.0)`` and ``(W - epsilon, H - epsilon)``. Note that coordinates are saved internally as floats. Parameters ---------- x1 : number X-coordinate of the top left of the bounding box. y1 : number Y-coordinate of the top left of the bounding box. x2 : number X-coordinate of the bottom right of the bounding box. y2 : number Y-coordinate of the bottom right of the bounding box. label : None or str, optional Label of the bounding box, e.g. a string representing the class. """ def __init__(self, x1, y1, x2, y2, label=None): """Create a new BoundingBox instance.""" if x1 > x2: x2, x1 = x1, x2 if y1 > y2: y2, y1 = y1, y2 self.x1 = x1 self.y1 = y1 self.x2 = x2 self.y2 = y2 self.label = label @property def coords(self): """Get the top-left and bottom-right coordinates as one array. Returns ------- ndarray A ``(N, 2)`` numpy array with ``N=2`` containing the top-left and bottom-right coordinates. """ arr = np.empty((2, 2), dtype=np.float32) arr[0, :] = (self.x1, self.y1) arr[1, :] = (self.x2, self.y2) return arr @property def x1_int(self): """Get the x-coordinate of the top left corner as an integer. Returns ------- int X-coordinate of the top left corner, rounded to the closest integer. """ # use numpy's round to have consistent behaviour between python # versions return int(np.round(self.x1)) @property def y1_int(self): """Get the y-coordinate of the top left corner as an integer. Returns ------- int Y-coordinate of the top left corner, rounded to the closest integer. """ # use numpy's round to have consistent behaviour between python # versions return int(np.round(self.y1)) @property def x2_int(self): """Get the x-coordinate of the bottom left corner as an integer. Returns ------- int X-coordinate of the bottom left corner, rounded to the closest integer. """ # use numpy's round to have consistent behaviour between python # versions return int(np.round(self.x2)) @property def y2_int(self): """Get the y-coordinate of the bottom left corner as an integer. Returns ------- int Y-coordinate of the bottom left corner, rounded to the closest integer. """ # use numpy's round to have consistent behaviour between python # versions return int(
np.round(self.y2)
numpy.round
import os.path from data.base_dataset import BaseDataset from data.image_folder import make_dataset import numpy as np import torch import data.exrlib as exrlib class ExrHeightDataset(BaseDataset): def __init__(self, opt): """Initialize this dataset class. Parameters: opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions """ BaseDataset.__init__(self, opt) assert(opt.image_type == 'exr') #self.A = os.path.join(opt.dataroot, opt.phase + '_input') self.A1 = os.path.join(opt.dataroot, opt.phase + '_input_terraform') self.B = os.path.join(opt.dataroot, opt.phase + '_output') #self.A_paths = sorted(make_dataset(self.A, opt.max_dataset_size)) self.A1_paths = sorted(make_dataset(self.A1, opt.max_dataset_size)) self.B_paths = sorted(make_dataset(self.B, opt.max_dataset_size)) #self.A_size = len(self.A_paths) # get the size of dataset A self.A1_size = len(self.A1_paths) self.B_size = len(self.B_paths) # get the size of dataset B btoA = self.opt.direction == 'BtoA' input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image self.A1_test_paths = sorted(make_dataset(os.path.join(opt.dataroot, 'test_input_terraform'))) self.B_test_paths = sorted(make_dataset(os.path.join(opt.dataroot, 'test_output'))) self.A1_test_size = len(self.A1_test_paths) self.B_test_size = len(self.B_test_paths) self.input_names = np.array(["RockDetailMask.RockDetailMask", "SoftDetailMask.SoftDetailMask", "cliffs.cliffs", "height.height", "mesa.mesa", "slope.slope", "slopex.slopex", "slopez.slopez"]) self.output_names = np.array(["RockDetailMask.RockDetailMask", "SoftDetailMask.SoftDetailMask", "bedrock.bedrock", "cliffs.cliffs", "flow.flow", "flowx.flowx", "flowz.flowz", "height.height", "mesa.mesa", "sediment.sediment", "water.water"]) self.input_channels = np.array([3, 6, 7]) #height, slopex, slopez self.output_channels = np.array([7]) #height if not self.opt.compute_bounds: self.i_channels_min = np.array([[[0, -400, -400]]]) self.i_channels_max = np.array([[[824, 20, 20]]]) self.o_channels_min = np.array([[[-4]]]) self.o_channels_max = np.array([[[819]]]) return channels_min = np.array([2**16 for _ in self.input_channels]) channels_max = np.array([0 for _ in self.input_channels]) examples = 0 for A1_path in self.A1_paths: A1_img = exrlib.read_exr_float32(A1_path, list(self.input_names[self.input_channels]), 512, 512).transpose(2, 0, 1).reshape(len(self.input_channels), -1) channels_min = np.min(np.concatenate((np.expand_dims(channels_min, 1), np.expand_dims(np.min(A1_img, 1), 1)), 1), 1) channels_max = np.max(np.concatenate((np.expand_dims(channels_min, 1), np.expand_dims(np.max(A1_img, 1), 1)), 1), 1) examples += 1 if examples >= 1000: break print(channels_min) self.i_channels_min = np.expand_dims(np.expand_dims(np.array(channels_min), 1), 2) print(channels_max) self.i_channels_max = np.expand_dims(np.expand_dims(np.array(channels_max), 1), 2) channels_min = np.array([2**16 for _ in self.output_channels]) channels_max = np.array([0 for _ in self.output_channels]) examples = 0 for B_path in self.B_paths: B_img = exrlib.read_exr_float32(B_path, list(self.output_names[self.output_channels]), 512, 512).transpose(2, 0, 1).reshape(len(self.output_channels), -1) channels_min = np.min(np.concatenate((
np.expand_dims(channels_min, 1)
numpy.expand_dims
import numpy as np from scipy.spatial.distance import pdist, squareform def k_closest(k, V, metric='cosine'): """ Performs an approximate solution to the problem of finding the closest group of k elements in a set of vectors. :param k: The number of elements in the result set. :param V: A 2-D array with vectors in rows for which to find the closest set of k vectors. :param metric: The metric for which to perform the distance measure. Possible values are the ones defined for scipy.spatial.distance.pdist. :return An array of row indices for the k closest row vectors. """ d = pdist(V, metric) D = squareform(d) N = D.shape[0] row_dists = np.zeros(N) neighbors = np.zeros((N, k), dtype=np.int) # For each element, calculate the sum of distances # to itself (=0) and its k-1 nearest neighbors. for i in range(0, N): row = D[i,:] # Get indices for the k closest items. indices =
np.argsort(row)
numpy.argsort
from cvxpy import * import numpy as np import scipy as sp import scipy.sparse as sparse # Discrete time model of the system (mass point with input force and friction) # Constants # Ts = 0.2 # sampling time (s) M = 2 # mass (Kg) b = 0.3 # friction coefficient (N*s/m) Ad = sparse.csc_matrix([ [1.0, Ts], [0, 1.0 -b/M*Ts] ]) Bd = sparse.csc_matrix([ [0.0], [Ts/M]]) # Continous-time matrices (just for reference) Ac = np.array([ [0.0, 1.0], [0, -b/M]] ) Bc = np.array([ [0.0], [1/M] ]) [nx, nu] = Bd.shape # number of states and number or inputs # Reference input and states pref = 7.0 vref = 0.0 xref = np.array([pref, vref]) # reference state uref = np.array([0.0]) # reference input uinit = np.array([0.0]) # input at time step negative one - used to penalize the first delta 0. Could be the same as uref. # Constraints xmin = np.array([-100.0, -100.0]) xmax = np.array([100.0, 100.0]) umin = np.array([-1.5])*100 umax =
np.array([1.5])
numpy.array
############################################### # rans(eXtreme) https://arxiv.org/abs/1401.5176 ############################################### # File: ransX_tseries.py # Author: <NAME> # Email: <EMAIL> # Date: December/2020 # Desc: calculates time-averages over tavg # Usage: run ransX_tseries.py from UTILS.PROMPI.PROMPI_data import PROMPI_ransdat from UTILS.TSERIES.ReadParamsTseries import ReadParamsTseries from UTILS.Errors import Errors import numpy as np import os import sys def main(): # check python version if sys.version_info[0] < 3: print("Python " + str(sys.version_info[0]) + " is not supported. EXITING.") sys.exit() # create os independent path and read parameter file paramFile = os.path.join('PARAMS', 'param.tseries') params = ReadParamsTseries(paramFile) # read input parameters datadir = params.getForTseries('tseries')['datadir'] endianness = params.getForTseries('tseries')['endianness'] precision = params.getForTseries('tseries')['precision'] dataout = params.getForTseries('tseries')['dataout'] trange_beg = params.getForTseries('tseries')['trange_beg'] trange_end = params.getForTseries('tseries')['trange_end'] trange = [trange_beg, trange_end] tavg = params.getForTseries('tseries')['tavg'] ransdat = [filee for filee in sorted(os.listdir(datadir)) if "ransdat" in filee] ransdat = [filee.replace(filee, datadir + filee) for filee in ransdat] filename = ransdat[0] ts = PROMPI_ransdat(filename, endianness, precision) time = [] dt = [] for filename in ransdat: print(filename) ts = PROMPI_ransdat(filename, endianness, precision) rans_tstart, rans_tend, rans_tavg = ts.rans_header() time.append(rans_tend) dt.append(rans_tavg) # print(rans_tend,rans_tavg) # convert to array time = np.asarray(time) dt =
np.asarray(dt)
numpy.asarray
# Contact: <NAME> [sara (dot) ferreira (at) fc (dot) up (dot) pt] # # This is free and unencumbered software released into the public domain. # # Anyone is free to copy, modify, publish, use, compile, sell, or # distribute this software, either in source code form or as a compiled # binary, for any purpose, commercial or non-commercial, and by any # means. # # In jurisdictions that recognize copyright laws, the author or authors # of this software dedicate any and all copyright interest in the # software to the public domain. We make this dedication for the benefit # of the public at large and to the detriment of our heirs and # successors. We intend this dedication to be an overt act of # relinquishment in perpetuity of all present and future rights to this # software under copyright law. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. import cv2 import numpy as np import os import radialProfile import glob from matplotlib import pyplot as plt import pickle from scipy.interpolate import griddata import pylab as py import time import sys if(len(sys.argv) != 5): print("Not enough arguments") print("insert <dir> <features> <max_files> <output filename>") exit() dir=sys.argv[1] if os.path.isdir(dir) is False: print("this directory does not exist") exit(0) N=int(sys.argv[2]) number_iter=int(sys.argv[3]) output_filename=str(sys.argv[4])+".pkl" data= {} epsilon = 1e-8 #N = 50 y = [] error = [] #number_iter = 6200 psd1D_total = np.zeros([number_iter, N]) label_total = np.zeros([number_iter]) psd1D_org_mean = np.zeros(N) psd1D_org_std =
np.zeros(N)
numpy.zeros
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import print_function from __future__ import division import tensorflow as tf import numpy as np from scipy import stats, misc, special from tests.distributions import utils from zhusuan.distributions.multivariate import * class TestMultinomial(tf.test.TestCase): def test_init_check_shape(self): with self.test_session(use_gpu=True): with self.assertRaisesRegexp(ValueError, "should have rank"): Multinomial(tf.zeros([]), 10) def test_init_n(self): dist = Multinomial(tf.ones([2]), 10) self.assertTrue(isinstance(dist.n_categories, int)) self.assertEqual(dist.n_categories, 2) self.assertTrue(isinstance(dist.n_experiments, int)) self.assertEqual(dist.n_experiments, 10) with self.assertRaisesRegexp(ValueError, "must be positive"): _ = Multinomial(tf.ones([2]), 0) with self.test_session(use_gpu=True) as sess: logits = tf.placeholder(tf.float32, None) n_experiments = tf.placeholder(tf.int32, None) dist2 = Multinomial(logits, n_experiments) self.assertEqual( sess.run([dist2.n_categories, dist2.n_experiments], feed_dict={logits: np.ones([2]), n_experiments: 10}), [2, 10]) with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, "should have rank"): dist2.n_categories.eval(feed_dict={logits: 1., n_experiments: 10}) with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, "should be a scalar"): dist2.n_experiments.eval(feed_dict={logits: [1.], n_experiments: [10]}) with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, "must be positive"): dist2.n_experiments.eval(feed_dict={logits: [1.], n_experiments: 0}) def test_value_shape(self): # static dist = Multinomial(tf.placeholder(tf.float32, [None, 2]), 10) self.assertEqual(dist.get_value_shape().as_list(), [2]) # dynamic logits = tf.placeholder(tf.float32, None) dist2 = Multinomial(logits, 10) self.assertTrue(dist2._value_shape().dtype is tf.int32) with self.test_session(use_gpu=True): self.assertEqual(dist2._value_shape().eval( feed_dict={logits: np.ones([2])}).tolist(), [2]) self.assertEqual(dist._value_shape().dtype, tf.int32) def test_batch_shape(self): def _distribution(param): return Multinomial(param, 10) utils.test_batch_shape_1parameter( self, _distribution, np.zeros, is_univariate=False) def test_sample_shape(self): def _distribution(param): return Multinomial(param, 10) utils.test_1parameter_sample_shape_one_rank_less( self, _distribution, np.zeros) def test_log_prob_shape(self): def _distribution(param): return Multinomial(param, 10) def _make_samples(shape): samples = np.zeros(shape) samples = samples.reshape((-1, shape[-1])) samples[:, 0] = 1 return samples.reshape(shape) utils.test_1parameter_log_prob_shape_one_rank_less( self, _distribution, _make_samples, _make_samples) def test_value(self): with self.test_session(use_gpu=True): def _test_value(logits, n_experiments, given): logits = np.array(logits, np.float32) normalized_logits = logits - misc.logsumexp( logits, axis=-1, keepdims=True) given = np.array(given) dist = Multinomial(logits, n_experiments) log_p = dist.log_prob(given) target_log_p = np.log(misc.factorial(n_experiments)) - \ np.sum(np.log(misc.factorial(given)), -1) + \ np.sum(given * normalized_logits, -1) self.assertAllClose(log_p.eval(), target_log_p) p = dist.prob(given) target_p = np.exp(target_log_p) self.assertAllClose(p.eval(), target_p) _test_value([-50., -20., 0.], 4, [1, 0, 3]) _test_value([1., 10., 1000.], 1, [1, 0, 0]) _test_value([[2., 3., 1.], [5., 7., 4.]], 3, np.ones([3, 1, 3], dtype=np.int32)) _test_value([-10., 10., 20., 50.], 100, [[0, 1, 99, 100], [100, 99, 1, 0]]) def test_dtype(self): def _distribution(param, dtype=None): return Multinomial(param, 10, dtype) utils.test_dtype_1parameter_discrete(self, _distribution) with self.assertRaisesRegexp(TypeError, "n_experiments must be"): Multinomial([1., 1.], tf.placeholder(tf.float32, [])) class TestOnehotCategorical(tf.test.TestCase): def test_init_check_shape(self): with self.test_session(use_gpu=True): with self.assertRaisesRegexp(ValueError, "should have rank"): OnehotCategorical(logits=tf.zeros([])) def test_init_n_categories(self): cat = OnehotCategorical(tf.ones([10])) self.assertTrue(isinstance(cat.n_categories, int)) self.assertEqual(cat.n_categories, 10) with self.test_session(use_gpu=True): logits = tf.placeholder(tf.float32, None) cat2 = OnehotCategorical(logits) self.assertEqual( cat2.n_categories.eval(feed_dict={logits: np.ones([10])}), 10) with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, "should have rank"): cat2.n_categories.eval(feed_dict={logits: 1.}) def test_value_shape(self): # static cat = OnehotCategorical(tf.placeholder(tf.float32, [None, 10])) self.assertEqual(cat.get_value_shape().as_list(), [10]) # dynamic logits = tf.placeholder(tf.float32, None) cat2 = OnehotCategorical(logits) self.assertTrue(cat2._value_shape().dtype is tf.int32) with self.test_session(use_gpu=True): self.assertEqual(cat2._value_shape().eval( feed_dict={logits: np.ones([2, 1, 3])}).tolist(), [3]) self.assertEqual(cat._value_shape().dtype, tf.int32) def test_batch_shape(self): utils.test_batch_shape_1parameter( self, OnehotCategorical, np.zeros, is_univariate=False) def test_sample_shape(self): utils.test_1parameter_sample_shape_one_rank_less( self, OnehotCategorical, np.zeros) def test_log_prob_shape(self): def _make_samples(shape): samples = np.zeros(shape) samples = samples.reshape((-1, shape[-1])) samples[:, 0] = 1 return samples.reshape(shape) utils.test_1parameter_log_prob_shape_one_rank_less( self, OnehotCategorical, _make_samples, _make_samples) def test_value(self): with self.test_session(use_gpu=True): def _test_value(logits, given): logits = np.array(logits, np.float32) normalized_logits = logits - misc.logsumexp( logits, axis=-1, keepdims=True) given = np.array(given, np.int32) cat = OnehotCategorical(logits) log_p = cat.log_prob(tf.one_hot(given, logits.shape[-1], dtype=tf.int32)) def _one_hot(x, depth): n_elements = x.size ret = np.zeros((n_elements, depth)) ret[np.arange(n_elements), x.flat] = 1 return ret.reshape(list(x.shape) + [depth]) target_log_p = np.sum(_one_hot( given, logits.shape[-1]) * normalized_logits, -1) self.assertAllClose(log_p.eval(), target_log_p) p = cat.prob(tf.one_hot(given, logits.shape[-1], dtype=tf.int32)) target_p = np.sum(_one_hot( given, logits.shape[-1]) * np.exp(normalized_logits), -1) self.assertAllClose(p.eval(), target_p) _test_value([0.], [0, 0, 0]) _test_value([-50., -10., -50.], [0, 1, 2, 1]) _test_value([0., 4.], [[0, 1], [0, 1]]) _test_value([[2., 3., 1.], [5., 7., 4.]], np.ones([3, 1, 1], dtype=np.int32)) def test_dtype(self): utils.test_dtype_1parameter_discrete(self, OnehotCategorical) class TestDirichlet(tf.test.TestCase): def test_init_check_shape(self): with self.test_session(use_gpu=True): with self.assertRaisesRegexp(ValueError, "should have rank"): Dirichlet(alpha=tf.zeros([])) def test_init_n_categories(self): dist = Dirichlet(tf.ones([10])) self.assertTrue(isinstance(dist.n_categories, int)) self.assertEqual(dist.n_categories, 10) with self.assertRaisesRegexp(ValueError, "n_categories.*should be at least 2"): Dirichlet(tf.ones([3, 1])) dist2 = Dirichlet(tf.placeholder(tf.float32, [3, None])) self.assertTrue(dist2.n_categories is not None) with self.test_session(use_gpu=True): alpha = tf.placeholder(tf.float32, None) dist3 = Dirichlet(alpha) self.assertEqual( dist3.n_categories.eval(feed_dict={alpha: np.ones([10])}), 10) with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, "should have rank"): dist3.n_categories.eval(feed_dict={alpha: 1.}) def test_value_shape(self): # static dist = Dirichlet(tf.placeholder(tf.float32, [None, 10])) self.assertEqual(dist.get_value_shape().as_list(), [10]) # dynamic alpha = tf.placeholder(tf.float32, None) dist2 = Dirichlet(alpha) self.assertEqual(dist2.get_value_shape().as_list(), [None]) self.assertTrue(dist2._value_shape().dtype is tf.int32) with self.test_session(use_gpu=True): self.assertEqual(dist2._value_shape().eval( feed_dict={alpha: np.ones([2, 1, 3])}).tolist(), [3]) self.assertEqual(dist._value_shape().dtype, tf.int32) def test_batch_shape(self): utils.test_batch_shape_1parameter( self, Dirichlet, np.zeros, is_univariate=False) def test_sample_shape(self): utils.test_1parameter_sample_shape_one_rank_less( self, Dirichlet, np.zeros) def test_log_prob_shape(self): def _make_samples(shape): samples = np.ones(shape, dtype=np.float32) return samples / samples.sum(axis=-1, keepdims=True) # TODO: This failed with a bug in Tensorflow, waiting fix. # https://github.com/tensorflow/tensorflow/issues/8391 # _test_static([3, None], [3, 2, 1, None], [3, 2, 3]) utils.test_1parameter_log_prob_shape_one_rank_less( self, Dirichlet, np.ones, _make_samples) def test_value(self): def dirichlet_logpdf(x, alpha): # scipy's implementation of dirichlet logpdf doesn't support # batch of x, we use this modified version. def _lnB(alpha): return np.sum(special.gammaln(alpha)) - \ special.gammaln(np.sum(alpha)) lnB = _lnB(alpha) return - lnB + np.sum(np.log(x) * (alpha - 1), -1) def dirichlet_pdf(x, alpha): return np.exp(dirichlet_logpdf(x, alpha)) with self.test_session(use_gpu=True): def _test_value_alpha_rank1(alpha, given): alpha = np.array(alpha, np.float32) given = np.array(given, np.float32) dist = Dirichlet(alpha) log_p = dist.log_prob(given) target_log_p = dirichlet_logpdf(given, alpha) self.assertAllClose(log_p.eval(), target_log_p) p = dist.prob(given) target_p = dirichlet_pdf(given, alpha) self.assertAllClose(p.eval(), target_p) _test_value_alpha_rank1([1., 1., 1.], [[0.2, 0.5, 0.3], [0.3, 0.4, 0.3]]) _test_value_alpha_rank1([2., 3., 4.], [0.3, 0.7, 0.]) # TODO: fix for case when alpha=1, given=0 def _test_value_alpha_rank2_given_rank2(alpha, given): alpha = np.array(alpha, np.float32) given = np.array(given, np.float32) alpha_b = alpha * np.ones_like(given) given_b = given * np.ones_like(alpha) dist = Dirichlet(alpha) log_p = dist.log_prob(given) target_log_p = np.array( [dirichlet_logpdf(given_b[i], alpha_b[i]) for i in range(alpha_b.shape[0])]) self.assertAllClose(log_p.eval(), target_log_p) p = dist.prob(given) target_p = np.array( [dirichlet_pdf(given_b[i], alpha_b[i]) for i in range(alpha_b.shape[0])]) self.assertAllClose(p.eval(), target_p) _test_value_alpha_rank2_given_rank2([[1., 2.], [3., 4.]], [0.5, 0.5]) _test_value_alpha_rank2_given_rank2([[5., 6.], [7., 8.]], [[0.1, 0.9]]) _test_value_alpha_rank2_given_rank2([[100., 1.], [0.01, 10.]], [[0., 1.], [1., 0.]]) def test_check_numerics(self): alpha = tf.placeholder(tf.float32, None) given = tf.placeholder(tf.float32, None) dist = Dirichlet(alpha, check_numerics=True) log_p = dist.log_prob(given) with self.test_session(use_gpu=True): with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, "log\(given\).*Tensor had Inf"): log_p.eval(feed_dict={alpha: np.ones([2]), given: [0., 1.]}) with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, "lbeta\(alpha\).*Tensor had NaN"): log_p.eval(feed_dict={alpha: [-1., 1.], given: [0.5, 0.5]}) def test_dtype(self): utils.test_dtype_1parameter_continuous(self, Dirichlet) class TestExpConcrete(tf.test.TestCase): def test_init_check_shape(self): with self.test_session(use_gpu=True): with self.assertRaisesRegexp(ValueError, "should have rank"): ExpConcrete(1., logits=tf.zeros([])) def test_init_n_categories(self): con = ExpConcrete(1., tf.ones([10])) self.assertTrue(isinstance(con.n_categories, int)) self.assertEqual(con.n_categories, 10) with self.test_session(use_gpu=True): logits = tf.placeholder(tf.float32, None) con2 = ExpConcrete(1., logits) self.assertEqual( con2.n_categories.eval(feed_dict={logits: np.ones([10])}), 10) with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, "should have rank"): con2.n_categories.eval(feed_dict={logits: 1.}) def test_init_temperature(self): with self.assertRaisesRegexp(ValueError, "should be a scalar"): ExpConcrete([1.], [1., 2.]) with self.test_session(use_gpu=True): temperature = tf.placeholder(tf.float32, None) con = ExpConcrete(temperature, [1., 2.]) with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, "should be a scalar"): con.temperature.eval(feed_dict={temperature: [1.]}) def test_value_shape(self): # static con = ExpConcrete(1., tf.placeholder(tf.float32, [None, 10])) self.assertEqual(con.get_value_shape().as_list(), [10]) # dynamic logits = tf.placeholder(tf.float32, None) con2 = ExpConcrete(1., logits) self.assertTrue(con2._value_shape().dtype is tf.int32) with self.test_session(use_gpu=True): self.assertEqual(con2._value_shape().eval( feed_dict={logits: np.ones([2, 1, 3])}).tolist(), [3]) self.assertEqual(con._value_shape().dtype, tf.int32) def test_batch_shape(self): def _proxy_distribution(logits): return ExpConcrete(1., logits) utils.test_batch_shape_1parameter( self, _proxy_distribution, np.zeros, is_univariate=False) def test_sample_shape(self): def _proxy_distribution(logits): return ExpConcrete(1., logits) utils.test_1parameter_sample_shape_one_rank_less( self, _proxy_distribution, np.zeros) def test_log_prob_shape(self): def _proxy_distribution(logits): return ExpConcrete(1., logits) def _make_samples(shape): samples = np.ones(shape, dtype=np.float32) return np.log(samples / samples.sum(axis=-1, keepdims=True)) utils.test_1parameter_log_prob_shape_one_rank_less( self, _proxy_distribution, np.ones, _make_samples) def test_value(self): with self.test_session(use_gpu=True): def _test_value(given, temperature, logits): given =
np.array(given, np.float32)
numpy.array
import sys,os import numpy as np import matplotlib.pyplot as plt from desitarget import cuts import fitsio import astropy.io.fits as fits import healpy as hp from scipy.special import erf from astropy.table import Table colorcuts_function = cuts.isELG_colors #deep DECaLS imaging, with photozs from HSC truthf = '/project/projectdirs/desi/users/ajross/MCdata/desi_mcsyst_truth.dr7.34ra38.-7dec-3.fits' truth = fitsio.read(truthf,1) gmag = truth["g"] w = gmag < 24.5 #truth = truth[w] gmag = truth["g"] rmag = truth["r"] zmag = truth["z"] photz = truth['hsc_mizuki_photoz_best'] #pixfn = '/project/projectdirs/desi/target/catalogs/dr8/0.31.1/pixweight/pixweight-dr8-0.31.1.fits' #update this to be more recent pixfn = '/global/cfs/cdirs/desi/target/catalogs/dr9m/0.42.0/pixweight/main/resolve/dark/pixweight-dark.fits' #dr9m version def mag2flux(mag) : return 10**(-0.4*(mag-22.5)) def flux2mag(flux) : mag = -2.5*np.log10(flux*(flux>0)+0.001*(flux<=0)) + 22.5 mag[(flux<=0)] = 0. return mag gflux = mag2flux(truth["g"]) rflux = mag2flux(truth["r"]) zflux = mag2flux(truth["z"]) w1flux =
np.zeros(gflux.shape)
numpy.zeros
""" This is the configuration file for generating a synthetic dataset from scratch in the csalt architecture. It is imported as a Python modeule in various subroutines, and will be copied into the directory outputbase_dir/reduced_dir/basename/ for future reference once the data generation is complete. """ import numpy as np """ LOCATORS: These set the desired locations and naming conventions of the outputs, as well as the locations of necessary ancillary information. """ # base path outputbase_dir = 'storage/' # path to simobserve outputs and blank template .MS files template_dir = outputbase_dir+'obs_templates/' # path to storage space for "raw" synthetic dataset files synthraw_dir = outputbase_dir+'synth_storage/' # path to concatenated, "reduced" dataset files reduced_dir = outputbase_dir+'data/' # path to hard-copies of CASA logs casalogs_dir = outputbase_dir+'CASA_logs/' # path to CASA/simobserve-format antenna configuration files antcfg_dir = '/pool/asha0/casa-release-5.7.2-4.el7/data/alma/simmos/' # datafile naming base basename = 'simple-demo' # synthetic "raw" naming base in_MS = synthraw_dir+basename+'/'+basename # synthetic "reduced" naming base dataname = reduced_dir+basename+'/'+basename """ SIMULATED OBSERVATION SETTINGS: """ # array observing settings template = ['exo12m-lo', 'exo12m-hi'] # template names config = ['alma.cycle8.3', 'alma.cycle8.6'] # antenna location lists date = ['2022/04/20', '2022/07/11'] # observation dates (UTC) HA_0 = ['-0.25h', '0.0h'] # HAs at observing starts ttotal = ['2min', '5min'] # total on-source times tinteg = ['30s', '30s'] # integration times per stamp # spectral settings dnu_native = [122070.3125, 122070.3125] # native channel spacings (Hz) nu_rest = 345.7959899e9 # rest frequency (Hz) V_tune = [4.0e3, 4.0e3] # LSRK tunings at centers (m/s) V_span = [15.0e3, 15.0e3] # +/- ranges around V_tune (m/s) nover = 1 # over-sampling factor (for SRF) # spatial settings RA = '16:00:00.00' # phase center RA DEC = '-40:00:00.00' # phase center DEC # noise model settings RMS = [10.3, 5.4] # desired RMS (mJy/beam/chan) """ DATA REDUCTION SETTINGS: """ tavg = ['', ''] # time-averaging intervals V_bounds = [5.2e3-5e3, 5.2e3+5e3] # excised V_LSRK range (m/s) """ INPUT MODEL PARAMETERS: """ # parametric_model inputs incl = 40. PA = 130. mstar = 0.7 r_l = 200. z0 = 2.5 psi = 1. T0 = 115. q = -0.5 Tmaxb = 20. sigV0 = 261. ltau0 = np.log10(500.) ppp = -1. Vsys = 5.2e3 dx = 0. dy = 0. pars = np.array([incl, PA, mstar, r_l, z0, psi, T0, q, Tmaxb, sigV0, ltau0, ppp, Vsys, dx, dy]) # fixed inputs FOV = [6.375, 6.375] # full FOV (arcsec) Npix = [256, 256] # number of pixels per FOV # note: pixsize = FOV/(Npix-1) dist = 150. # distance (pc) cfg_dict = {} # passable dictionary of kwargs """ ADDITIONAL MISCELLANY: """ # process phase center into degrees RA_pieces = [np.float(RA.split(':')[i]) for i in np.arange(3)] RAdeg = 15 * np.sum(np.array(RA_pieces) / [1., 60., 3600.]) DEC_pieces = [np.float(DEC.split(':')[i]) for i in
np.arange(3)
numpy.arange
# -*- coding: utf-8 -*- from pytta.classes._instanceinfo import RememberInstanceCreationInfo as RICI from pytta.classes.filter import fractional_octave_frequencies as FOF from pytta.classes import SignalObj, OctFilter, ImpulsiveResponse from pytta.utils import fractional_octave_frequencies as FOF, freq_to_band from math import isnan import matplotlib.pyplot as plt import matplotlib.ticker as ticker import numpy as np import time import locale from pytta import _h5utils as _h5 from pytta import _plot as plot import copy as cp # Analysis types and its units anTypes = {'RT': ('s', 'Reverberation time'), 'C': ('dB', 'Clarity'), 'D': ('%', 'Definition'), 'G': ('dB', 'Strength factor'), 'L': ('dB', 'Level'), 'mixed': ('-', 'Mixed')} class Analysis(RICI): """ Objects belonging to the Analysis class holds fractional octave band data. It does conveniently the operations linearly between Analyses of the type 'Level'. Therefore those operations do not occur with values in dB scale. Available Analysis' types below. For more information see each parameter/attribute/method specific documentation. Creation parameters (default), (type): -------------------------------------- * anType (), (string): Type of the Analysis. May be: - 'RT' for 'Reverberation time' Analysis in [s]; - 'C' for 'Clarity' in dB; - 'D' for 'Definition' in %; - 'G' for 'Strength factor' in dB; - 'L' for any 'Level' Analysis in dB (e.g: SPL); - 'mixed' for any combination between the types above. * nthOct, (int): The number of fractions per octave; * minBand, (int | float): The exact or approximated start frequency; * maxBand, (int | float): The exact or approximated stop frequency; * data, (list | numpy array): The data with the exact number of bands between the specified minimum (minBand) and maximum band (maxBand); * dataLabel (''), (string): Label for plots; * error, (list | numpy array): The error with the exact number of bands between the specified minimum (minBand) and maximum band (maxBand); * errorLabel (''), (string): Label for plots; * comment ('No comments.'), (string): Some comment about the object. * xLabel (None), (string): x axis plot label; * yLabel (None), (string): y axis plot label; * title (None), (string): plot title. Attributes: ----------- * bands (NumPy array): The bands central frequencies. Properties: ----------- * minBand, (int | float): When a new limit is set data is automatic adjusted. * maxBand, (int | float): When a new limit is set data is automatic adjusted. Methods: -------- * plot_bars(): Generates a bar plot. """ # Magic methods def __init__(self, anType, nthOct, minBand, maxBand, data, dataLabel=None, error=None, errorLabel='Error', comment='No comments.', xLabel=None, yLabel=None, title=None): super().__init__() self.anType = anType self.nthOct = nthOct self._minBand = minBand self._maxBand = maxBand self.data = data self.dataLabel = dataLabel self.error = error self.errorLabel = errorLabel self.comment = comment # Plot infos memory self.xLabel = xLabel self.yLabel = yLabel self.title = title return def __str__(self): return ('1/{} octave band {} '.format(self.nthOct, self.anType) + 'analysis from the {} [Hz] to the '.format(self.minBand) + '{} [Hz] band.'.format(self.maxBand)) def __repr__(self): return (f'{self.__class__.__name__}(' f'anType={self.anType!r}, ' f'nthOct={self.nthOct!r}, ' f'minBand={self.minBand!r}, ' f'maxBand={self.maxBand!r}, ' f'data={self.data!r}, ' f'comment={self.comment!r})') def __add__(self, other): if isinstance(other, Analysis): if other.range != self.range: raise ValueError("Can't subtract! Both Analysis have" + " different band limits.") if self.anType == 'L': if other.anType == 'L': data = [] for idx, value in enumerate(self.data): d = 10*np.log10(10**(value/10) + 10**(other.data[idx]/10)) data.append(d) anType = 'L' elif other.anType in ['mixed', 'C', 'D', 'RT']: data = self.data + other.data anType = 'mixed' else: raise NotImplementedError("Operation not implemented " + "for Analysis types " + anTypes[self.anType][1] + " and " + anTypes[other.anType][1] + ".") else: data = self.data + other.data anType = 'mixed' elif isinstance(other, (int, float)): if self.anType == 'L': data = [10*np.log10(10**(dt/10) + 10**(other/10)) for dt in self.data] anType = 'L' else: data = self.data + other anType = 'mixed' else: raise NotImplementedError("Operation not implemented between " + "Analysis and {}".format(type(other)) + "types.") selfDataLabel = self.dataLabel if self.dataLabel is not None \ else 'Analysis 1' if hasattr(other,'dataLabel'): if other.dataLabel is not None: otherDataLabel = other.dataLabel else: otherDataLabel = 'Analysis 2' else: otherDataLabel = 'Analysis 2' result = Analysis(anType=anType, nthOct=self.nthOct, minBand=self.minBand, maxBand=self.maxBand, data=data, dataLabel=selfDataLabel + ' + ' + otherDataLabel, error=None, errorLabel=None, comment=None, xLabel=self.xLabel, yLabel=self.yLabel, title=None) return result def __sub__(self, other): if isinstance(other, Analysis): if other.range != self.range: raise ValueError("Can't subtract! Both Analysis have" + " different band limits.") if self.anType == 'L': if other.anType == 'L': data = [] for idx, value in enumerate(self.data): d = 10*np.log10(10**(value/10) - 10**(other.data[idx]/10)) data.append(d) anType = 'L' elif other.anType in ['mixed', 'C', 'D', 'RT']: data = self.data - other.data anType = 'mixed' else: raise NotImplementedError("Operation not implemented " + "for Analysis types " + anTypes[self.anType][1] + " and " + anTypes[other.anType][1] + ".") else: data = self.data - other.data anType = 'mixed' elif isinstance(other, (int, float)): if self.anType == 'L': data = [10*np.log10(10**(dt/10) - 10**(other/10)) for dt in self.data] anType = 'L' else: data = self.data - other anType = 'mixed' else: raise NotImplementedError("Operation not implemented between " + "Analysis and {}".format(type(other)) + "types.") selfDataLabel = self.dataLabel if self.dataLabel is not None \ else 'Analysis 1' if hasattr(other,'dataLabel'): if other.dataLabel is not None: otherDataLabel = other.dataLabel else: otherDataLabel = 'Analysis 2' else: otherDataLabel = 'Analysis 2' result = Analysis(anType=anType, nthOct=self.nthOct, minBand=self.minBand, maxBand=self.maxBand, data=data, dataLabel=selfDataLabel + ' - ' + otherDataLabel, error=None, errorLabel=None, comment=None, xLabel=self.xLabel, yLabel=self.yLabel, title=None) return result def __mul__(self, other): if isinstance(other, Analysis): if other.range != self.range: raise ValueError("Can't multiply! Both Analysis have " + "different band limits.") anType='mixed' data=self.data*other.data elif isinstance(other, (int, float)): anType='mixed' data=self.data*other else: raise TypeError("Analysis can only be operated with int, float, " + "or Analysis types.") selfDataLabel = self.dataLabel if self.dataLabel is not None \ else 'Analysis 1' if hasattr(other,'dataLabel'): if other.dataLabel is not None: otherDataLabel = other.dataLabel else: otherDataLabel = 'Analysis 2' else: otherDataLabel = 'Analysis 2' result = Analysis(anType=anType, nthOct=self.nthOct, minBand=self.minBand, maxBand=self.maxBand, data=data, dataLabel=selfDataLabel + ' * ' + otherDataLabel, error=None, errorLabel=None, comment=None, xLabel=self.xLabel, yLabel=self.yLabel, title=None) return result def __rtruediv__(self, other): if isinstance(other, Analysis): if self.anType == 'L': if other.range != self.range: raise ValueError("Can't divide! Both Analysis have" + " different band limits.") elif other.anType in ['mixed', 'C', 'D', 'RT']: data = other.data / self.data anType = 'mixed' else: raise NotImplementedError("Operation not implemented " + "for Analysis types " + anTypes[self.anType][1] + " and " + anTypes[other.anType][1] + ".") else: data = other.data / self.data anType = 'mixed' elif isinstance(other, (int, float)): if self.anType == 'L': data = [10*np.log10(10**(dt/10) / other) for dt in self.data] anType = 'L' else: data = other / self.data anType = 'mixed' else: raise NotImplementedError("Operation not implemented between " + "Analysis and {}".format(type(other)) + "types.") selfDataLabel = self.dataLabel if self.dataLabel is not None \ else 'Analysis 1' if hasattr(other,'dataLabel'): if other.dataLabel is not None: otherDataLabel = other.dataLabel else: otherDataLabel = 'Analysis 2' else: otherDataLabel = 'Analysis 2' result = Analysis(anType=anType, nthOct=self.nthOct, minBand=self.minBand, maxBand=self.maxBand, data=data, dataLabel=selfDataLabel + ' / ' + otherDataLabel, error=None, errorLabel=None, comment=None, xLabel=self.xLabel, yLabel=self.yLabel, title=None) return result def __truediv__(self, other): if isinstance(other, Analysis): if self.anType == 'L': if other.range != self.range: raise ValueError("Can't divide! Both Analysis have" + " different band limits.") elif other.anType in ['mixed', 'C', 'D', 'RT']: data = self.data / other.data anType = 'mixed' else: raise NotImplementedError("Operation not implemented " + "for Analysis types " + anTypes[self.anType][1] + " and " + anTypes[other.anType][1] + ".") else: data = self.data / other.data anType = 'mixed' elif isinstance(other, (int, float)): if self.anType == 'L': data = [10*np.log10(10**(dt/10) / other) for dt in self.data] anType = 'L' else: data = self.data / other anType = 'mixed' else: raise NotImplementedError("Operation not implemented between " + "Analysis and {}".format(type(other)) + "types.") selfDataLabel = self.dataLabel if self.dataLabel is not None \ else 'Analysis 1' if hasattr(other,'dataLabel'): if other.dataLabel is not None: otherDataLabel = other.dataLabel else: otherDataLabel = 'Analysis 2' else: otherDataLabel = 'Analysis 2' result = Analysis(anType=anType, nthOct=self.nthOct, minBand=self.minBand, maxBand=self.maxBand, data=data, dataLabel=selfDataLabel + ' / ' + otherDataLabel, error=None, errorLabel=None, comment=None, xLabel=self.xLabel, yLabel=self.yLabel, title=None) return result # Properties @property def anType(self): """Type of the Analysis. May be: - 'RT' for 'Reverberation time' Analysis in [s]; - 'C' for 'Clarity' in dB; - 'D' for 'Definition' in %; - 'G' for 'Strength factor' in dB; - 'L' for any 'Level' Analysis in dB (e.g: SPL); - 'mixed' for any combination between the types above. Return: ------- string. """ return self._anType @anType.setter def anType(self, newType): if type(newType) is not str: raise TypeError("anType parameter makes reference to the " + "calculated parameter, e.g. 'RT' for " + "reverberation time, and must be a str value.") elif newType not in anTypes: raise ValueError(newType + " type not supported. May be 'RT, " + "'C', 'D', 'G', 'L', or 'mixed'.") self.unit = anTypes[newType][0] self.anName = anTypes[newType][1] self._anType = newType return @property def nthOct(self): """octave band fraction. Could be 1, 3, 6... Return: ------- int. """ return self._nthOct @nthOct.setter def nthOct(self, new): if not isinstance(new, int): raise TypeError("Number of bands per octave must be int") if '_nthOct' in locals(): if self.nthOct > new: raise TypeError("It's impossible to convert from " + "{} to {} bands".format(self.nthOct, new) + "per octave") else: raise NotImplementedError('Conversion between different ' + 'nthOct not implemented yet.') else: self._nthOct = new return @property def minBand(self): """minimum octave fraction band. When a new limit is set data is automatic adjusted. Return: ------- float. """ return self._minBand @minBand.setter def minBand(self, new): if type(new) is not int and type(new) is not float: raise TypeError("Frequency range values must \ be either int or float.") if new in self.bands: print("ATTENTION! Deleting data below " + str(new) + " [Hz].") self._minBand = new self.data = self.data[int(np.where(self.bands==new)[-1]):] else: adjNew = self.bands[int(np.where(self.bands<=new)[-1])] print("'" + str(new) + "' is not a valid band. " + "Taking the closest band: " + str(adjNew) + " [Hz].") self._minBand = adjNew self.data = self.data[int(np.where(self.bands==adjNew)[-1]):] return @property def maxBand(self): """maximum octave fraction band. When a new limit is set data is automatic adjusted. Return: ------- float. """ return self._maxBand @maxBand.setter def maxBand(self, new): if type(new) is not int and type(new) is not float: raise TypeError("Frequency range values must \ be either int or float.") if new in self.bands: print("ATTENTION! Deleting data above " + str(new) + " [Hz].") self._maxBand = new self.data = self.data[:int(np.where(self.bands==new)[-1])+1] else: adjNew = self.bands[int(np.where(self.bands<=new)[-1])] print("'" + str(new) + "' is not a valid band. " + "Taking the closest band: " + str(adjNew) + " [Hz].") self._maxBand = adjNew self.data = self.data[:int(np.where(self.bands==adjNew)[-1])+1] return # self._bandMax = new # return @property def range(self): return (self.minBand, self.maxBand) @property def data(self): """Fractional octave bands data. data must be a list or NumPy ndarray with the same number of elements than bands between the specified minimum (minBand) and maximum band (maxBand). Return: ------- NumPy ndarray. """ return self._data @data.setter def data(self, newData): bands = FOF(nthOct=self.nthOct, freqRange=(self.minBand, self.maxBand))[:,1] self._minBand = float(bands[0]) self._maxBand = float(bands[-1]) if not isinstance(newData, list) and \ not isinstance(newData, np.ndarray): raise TypeError("'data' must be provided as a list or " + "numpy ndarray.") elif len(newData) != len(bands): raise ValueError("Provided 'data' has different number of bands " + "then the existent bands between " + "{} and {} [Hz].".format(self.minBand, self.maxBand)) # ... self._data = np.array(newData) self._bands = bands return @property def error(self): """error per octave fraction band. The error must be a list or NumPy ndarray with same number of elements as bands between the specified minimum (minBand) and maximum bands (maxBand); Shown as +-error. Return: ------- NumPy ndarray. """ return self._error @error.setter def error(self, newError): if not isinstance(newError, np.ndarray) and \ not isinstance(newError, list) and \ newError is not None: raise TypeError("'error' must be provided as a list, numpy " + "ndarray or None.") if newError is not None: if len(newError) != len(self.data): raise ValueError("'error' must have the same length as 'data'.") self._error = np.array(newError) else: self._error = newError return @property def dataLabel(self): """Label of the data. Used for plot purposes. Return: ------- str. """ return self._dataLabel @dataLabel.setter def dataLabel(self, newLabel): if newLabel is not None and not isinstance(newLabel, str): raise TypeError("'dataLabel' must be a string or None.") self._dataLabel = newLabel return @property def errorLabel(self): """Label of the error information. Used for plot purposes. Return: ------- str. """ return self._errorLabel @errorLabel.setter def errorLabel(self, newLabel): if newLabel is not None and not isinstance(newLabel, str): raise TypeError("'errorLabel' must be a string or None.") self._errorLabel = newLabel return @property def bands(self): """The octave fraction bands central frequencies. Return: ------- list with the fractional octave bands of this Analysis. """ return self._bands # Methods def _h5_save(self, h5group): """ Saves itself inside a hdf5 group from an already opened file via pytta.save(...). """ h5group.attrs['class'] = 'Analysis' h5group.attrs['anType'] = self.anType h5group.attrs['nthOct'] = self.nthOct h5group.attrs['minBand'] = self.minBand h5group.attrs['maxBand'] = self.maxBand h5group.attrs['dataLabel'] = _h5.attr_parser(self.dataLabel) h5group.attrs['errorLabel'] = _h5.attr_parser(self.errorLabel) h5group.attrs['comment'] = _h5.attr_parser(self.comment) h5group.attrs['xLabel'] = _h5.attr_parser(self.xLabel) h5group.attrs['yLabel'] = _h5.attr_parser(self.yLabel) h5group.attrs['title'] = _h5.attr_parser(self.title) h5group['data'] = self.data if self.error is not None: h5group['error'] = self.error return def plot(self, **kwargs): """Plot the analysis data in fractinal octave bands. Parameters (default), (type): ----------------------------- * dataLabel ('Analysis type [unit]'), (str): legend label for the current data * errorLabel ('Error'), (str): legend label for the current data error * xLabel ('Time [s]'), (str): x axis label. * yLabel ('Amplitude'), (str): y axis label. * yLim (), (list): inferior and superior limits. >>> yLim = [-100, 100] * title (), (str): plot title * decimalSep (','), (str): may be dot or comma. >>> decimalSep = ',' # in Brazil * barWidth (0.75), float: width of the bars from one fractional octave band. 0 < barWidth < 1. * errorStyle ('standard'), str: error curve style. May be 'laza' or None/'standard'. * forceZeroCentering ('False'), bool: force centered bars at Y zero. Return: -------- matplotlib.figure.Figure object. """ return self.plot_bars(**kwargs) def plot_bars(self, dataLabel:str=None, errorLabel:str=None, xLabel:str=None, yLabel:str=None, yLim:list=None, xLim:list=None, title:str=None, decimalSep:str=',', barWidth:float=0.75, errorStyle:str=None, forceZeroCentering:bool=False, overlapBars:bool=False, color:list=None): """Plot the analysis data in fractinal octave bands. Parameters (default), (type): ----------------------------- * dataLabel ('Analysis type [unit]'), (str): legend label for the current data * errorLabel ('Error'), (str): legend label for the current data error * xLabel ('Time [s]'), (str): x axis label. * yLabel ('Amplitude'), (str): y axis label. * yLim (), (list): inferior and superior limits. >>> yLim = [-100, 100] * xLim (), (list): bands limits. >>> xLim = [100, 10000] * title (), (str): plot title * decimalSep (','), (str): may be dot or comma. >>> decimalSep = ',' # in Brazil * barWidth (0.75), float: width of the bars from one fractional octave band. 0 < barWidth < 1. * errorStyle ('standard'), str: error curve style. May be 'laza' or None/'standard'. * forceZeroCentering ('False'), bool: force centered bars at Y zero. * overlapBars ('False'), bool: overlap bars. No side by side bars of different data. * color (None), list: list containing the color of each Analysis. Return: -------- matplotlib.figure.Figure object. """ if dataLabel is not None: self.dataLabel = dataLabel if errorLabel is not None: self.errorLabel = errorLabel if xLabel is not None: self.barsXLabel = xLabel else: if hasattr(self, 'barsXLabel'): if self.barsXLabel is not None: xLabel = self.barsXLabel if yLabel is not None: self.barsYLabel = yLabel else: if hasattr(self, 'barsYLabel'): if self.barsYLabel is not None: yLabel = self.barsYLabel if title is not None: self.barsTitle = title else: if hasattr(self, 'barsTitle'): if self.barsTitle is not None: title = self.barsTitle fig = plot.bars((self,), xLabel, yLabel, yLim, xLim, self.title, decimalSep, barWidth, errorStyle, forceZeroCentering, overlapBars, color) return fig class RoomAnalysis(Analysis): """Room monoaural acoustical parameters for quality analysis. Provides interface to estimate several room parameters based on the energy distribution of the impulse response. Calculations compliant to ISO 3382-1 to obtain room acoustic parameters. It has an implementation of Lundeby et al. [1] algorithm to estimate the correction factor for the cumulative integral, as suggested by the ISO 3382-1. This class receives an one channel SignalObj or ImpulsiveResponse and calculate all the room acoustic parameters. Available room parameters: D50, C80, Ts, STearly, STlate, EDT, T20, T30. Creation parameters (default), (type): ------------ * signalArray (ndarray | list), (NumPy array): signal at specified domain. For 'freq' domain only half of the spectra must be provided. The total numSamples should also be provided. * ir (), (SignalObj): Monaural room impulse response signal. * nthOct (1), (int): Number of bands per octave. The default is 1. * minFreq (20), (float): Central frequency of the first band. The default is 2e1. * maxFreq (20000) (float): Central frequency of the last band. The default is 2e4. * *args : () (Tuple): See Analysis class. * bypassLundeby (false), (bool): Bypass Lundeby calculation, or not. The default is False. * suppressWarnings (false), (bool): Supress Lundeby warnings. The default is True. * ircut (None), (float): Cut the IR and throw away the silence tail. The default is None. * **kwargs (), (Dict): See Analysis. Attributes (default), (data type): ----------------------------------- * parameters (), (Tuple): List of parameters names. return tuple(self._params.keys()) * rms (), (np.ndarray): Effective IR amplitude by frequency `band`. * SPL (), (np.ndarray): Equivalent IR level by frequency `band`. * D50 (), (np.ndarray): Room Definition by frequency `band`. * C80 (), (np.ndarray): Room Clarity by frequency `band`. * Ts (), (np.ndarray): Central Time by frequency `band`. * STearly (), (np.ndarray): Early energy distribution by frequency `band`. * STlate (), (np.ndarray): Late energy distribution by frequency `band`. * EDT (), (np.ndarray): Early Decay Time by frequency `band`. * T20 (), (np.ndarray): Reverberation time with 20 dB decay, by frequency `band`. * T30 (), (np.ndarray): Reverberation time with 30 dB decay, by frequency `band`. Methods: --------- * plot_param(name [str], **kwargs): Plot a chart with the parameter passed in as `name`. * plot_rms(label [str], **kwargs): Plot a chart for the impulse response's `rms` by frequency `bands`. * plot_SPL(label [str], yaxis [str], **kwargs): Plot a chart for the impulse response's `SPL` by frequency `bands`. * plot_C80(label [str], yaxis [str], **kwargs): Plot a chart for the impulse response's `C80` by frequency `bands`. * plot_D50(label [str], yaxis [str], **kwargs): Plot a chart for the impulse response's `D50` by frequency `bands`. * plot_T20(label [str], yaxis [str], **kwargs): Plot a chart for the impulse response's `T20` by frequency `bands`. * plot_T30(label [str], yaxis [str], **kwargs): Plot a chart for the impulse response's `T30` by frequency `bands`. * plot_Ts(label [str], yaxis [str], **kwargs): Plot a chart for the impulse response's `Ts` by frequency `bands`. * plot_EDT(label [str], yaxis [str], **kwargs): Plot a chart for the impulse response's `EDT` by frequency `bands`. * plot_STearly(label [str], yaxis [str], **kwargs): Plot a chart for the impulse response's `STearly` by frequency `bands`. * plot_STlate(label [str], yaxis [str], **kwargs): Plot a chart for the impulse response's `STlate` by frequency `bands`. For further information on methods see its specific documentation. Authors: <NAME>, <EMAIL> <NAME>, <EMAIL> <NAME>, <EMAIL>""" def __init__(self, ir: SignalObj, nthOct: int = 1, minFreq: float = 2e1, maxFreq: float = 2e4, *args, plotLundeby: bool = False, bypassLundeby: bool = False, suppressWarnings: bool = True, ircut: float = None, **kwargs): _ir = ir.IR if type(ir) == ImpulsiveResponse else ir minBand = freq_to_band(minFreq, nthOct, 1000, 10) maxBand = freq_to_band(maxFreq, nthOct, 1000, 10) nbands = maxBand - minBand + 1 super().__init__('mixed', nthOct, minFreq, maxFreq, nbands*[0], *args, **kwargs) self.ir = crop_IR(_ir, ircut) self._params = self.estimate_energy_parameters(self.ir, self.bands, plotLundeby, bypassLundeby, suppressWarnings, nthOct=nthOct, minFreq=minFreq, maxFreq=maxFreq) return @staticmethod def estimate_energy_parameters(ir: SignalObj, bands: np.ndarray, plotLundeby: bool = False, bypassLundeby: bool = False, suppressWarnings: bool = False, **kwargs): """ Estimate the Impulse Response energy parameters. Parameters ---------- bypassLundeby : bool Whether to bypass calculation of Lundeby IR improvements or not. The default is False. suppressWarnings : bool If supress warnings about IR quality and the bypassing of Lundeby calculations. The default is False. Returns ------- params : Dict[str, np.ndarray] A dict with parameters by name. """ listEDC, fhSignal = cumulative_integration(ir, bypassLundeby, plotLundeby, suppressWarnings, **kwargs) params = {} params['rms'] = fhSignal.rms() params['SPL'] = fhSignal.spl() params['Ts'] = central_time(fhSignal.timeSignal**2, fhSignal.timeVector) params['D50'] = definition(listEDC, ir.samplingRate) params['C80'] = clarity(listEDC, ir.samplingRate) params['STearly'] = st_early(listEDC, ir.samplingRate) params['STlate'] = st_late(listEDC, ir.samplingRate) params['EDT'] = reverberation_time('EDT', listEDC) params['T20'] = reverberation_time(20, listEDC) params['T30'] = reverberation_time(30, listEDC) # self._params['BR'], self._params['TR'] = timbre_ratios(self.T20) return params @property def parameters(self): """List of parameters names.""" return tuple(self._params.keys()) @property def rms(self): """Effective IR amplitude by frequency `band`.""" return self._params['rms'] @property def SPL(self): """Equivalent IR level by frequency `band`.""" return self._params['SPL'] @property def D50(self): """Room Definition by frequency `band`.""" return self._params['D50'] @property def C80(self): """Effective IR amplitude, by frequency `band`.""" return self._params['C80'] @property def Ts(self): """Central Time by frequency `band`.""" return self._params['Ts'] @property def STearly(self): """Early energy distribution by frequency `band`.""" return self._params['STearly'] @property def STlate(self): """Late energy distribution by frequency `band`.""" return self._params['STlate'] @property def EDT(self): """Early Decay Time by frequency `band`.""" return self._params['EDT'] @property def T20(self): """Reverberation time with 20 dB decay, by frequency `band`.""" return self._params['T20'] @property def T30(self): """Reverberation time with 30 dB decay, by frequency `band`.""" return self._params['T30'] # @property # def BR(self): # """Reverberation time with 30 dB decay, by frequency `band`.""" # return self._params['BR'] # @property # def TR(self): # """Reverberation time with 30 dB decay, by frequency `band`.""" # return self._params['TR'] def plot_param(self, name: str, **kwargs): """ Plot a chart with the parameter passed in as `name`. Parameters ---------- name : str Room parameter name, e.g. `'T20' | 'C80' | 'SPL'`, etc. kwargs: Dict All kwargs accepted by `Analysis.plot_bar`. Returns ------- f : matplotlib.Figure The figure of the plot chart. """ self._data = self._params[name] f = self.plot(**kwargs) self._data = np.zeros(self.bands.shape) return f def plot_rms(self, label='RMS', **kwargs): """Plot a chart for the impulse response's `rms` by frequency `bands`.""" return self.plot_param('rms', dataLabel=label, **kwargs) def plot_SPL(self, label='SPL', yaxis='Level [dB]', **kwargs): """Plot a chart for the impulse response's `SPL` by frequency `bands`.""" return self.plot_param('SPL', dataLabel=label, yLabel=yaxis, **kwargs) def plot_C80(self, label='C80', yaxis='Clarity [dB]', **kwargs): """Plot a chart for the impulse response's `C80` by frequency `bands`.""" return self.plot_param('C80', dataLabel=label, yLabel=yaxis, **kwargs) def plot_D50(self, label='D50', yaxis='Definition [%]', **kwargs): """Plot a chart for the impulse response's `D50` by frequency `bands`.""" return self.plot_param('D50', dataLabel=label, yLabel=yaxis, **kwargs) def plot_T20(self, label='T20', yaxis='Reverberation time [s]', **kwargs): """Plot a chart for the impulse response's `T20` by frequency `bands`.""" return self.plot_param('T20', dataLabel=label, yLabel=yaxis, **kwargs) def plot_T30(self, label='T30', yaxis='Reverberation time [s]', **kwargs): """Plot a chart for the impulse response's `T30` by frequency `bands`.""" return self.plot_param('T30', dataLabel=label, yLabel=yaxis, **kwargs) def plot_Ts(self, label='Ts', yaxis='Central time [s]', **kwargs): """Plot a chart for the impulse response's `Ts` by frequency `bands`.""" return self.plot_param('Ts', dataLabel=label, yLabel=yaxis, **kwargs) def plot_EDT(self, label='EDT', yaxis='Early Decay Time [s]', **kwargs): """Plot a chart for the impulse response's `EDT` by frequency `bands`.""" return self.plot_param('EDT', dataLabel=label, yLabel=yaxis, **kwargs) def plot_STearly(self, label='STearly', yaxis='Early reflection level [dB]', **kwargs): """Plot a chart for the impulse response's `STearly` by frequency `bands`.""" return self.plot_param('STearly', dataLabel=label, yLabel=yaxis, **kwargs) def plot_STlate(self, label='STlate', yaxis='Late reflection level [dB]', **kwargs): """Plot a chart for the impulse response's `STlate` by frequency `bands`.""" return self.plot_param('STlate', dataLabel=label, yLabel=yaxis, **kwargs) # def plot_BR(self): # """Plot a chart for the impulse response's `BR` by frequency `bands`.""" # return self.plot_param('BR') # def plot_TR(self): # """Plot a chart for the impulse response's `TR` by frequency `bands`.""" # return self.plot_param('TR') def _filter(signal, order: int = 4, nthOct: int = 3, minFreq: float = 20, maxFreq: float = 20000, refFreq: float = 1000, base: int = 10): of = OctFilter(order=order, nthOct=nthOct, samplingRate=signal.samplingRate, minFreq=minFreq, maxFreq=maxFreq, refFreq=refFreq, base=base) result = of.filter(signal) return result[0] # @njit def _level_profile(timeSignal, samplingRate, numSamples, numChannels, blockSamples=None): """Get h(t) in octave bands and do the local time averaging in nblocks. Returns h^2_averaged(block).""" def mean_squared(x): return np.mean(x**2) if blockSamples is None: blockSamples = 100 nblocks = int(numSamples // blockSamples) profile = np.zeros((nblocks, numChannels), dtype=np.float32) timeStamp = np.zeros((nblocks, 1)) for ch in range(numChannels): # if numChannels == 1: # tmp = timeSignal # else: tmp = timeSignal[:, ch] for idx in range(nblocks): profile[idx, ch] = mean_squared(tmp[:blockSamples]) timeStamp[idx, 0] = idx*blockSamples/samplingRate tmp = tmp[blockSamples:] return profile, timeStamp # @njit def _start_sample_ISO3382(timeSignal, threshold) -> np.ndarray: squaredIR = timeSignal**2 # assume the last 10% of the IR is noise, and calculate its noise level last10Idx = -int(len(squaredIR)//10) noiseLevel = np.mean(squaredIR[last10Idx:]) # get the maximum of the signal, that is the assumed IR peak max_val = np.max(squaredIR) max_idx = np.argmax(squaredIR) # check if the SNR is enough to assume that the signal is an IR. If not, # the signal is probably not an IR, so it starts at sample 1 idxNoShift = np.asarray([max_val < 100*noiseLevel or max_idx > int(0.9*squaredIR.shape[0])]) # less than 20dB SNR or in the "noisy" part if idxNoShift.any(): print("noiseLevelCheck: The SNR too bad or this is not an " + "impulse response.") return 0 # find the first sample that lies under the given threshold threshold = abs(threshold) startSample = 1 # # TODO - envelope mar/pdi - check! # if idxNoShift: # print("Something wrong!") # return # if maximum lies on the first point, then there is no point in searching # for the beginning of the IR. Just return this position. if max_idx > 0: abs_dat = 10*np.log10(squaredIR[:max_idx]) \ - 10.*np.log10(max_val) thresholdNotOk = True thresholdShift = 0 while thresholdNotOk: if len(np.where(abs_dat < (-threshold+thresholdShift))[0]) > 0: lastBelowThreshold = \ np.where(abs_dat < (-threshold+thresholdShift))[0][-1] thresholdNotOk = False else: thresholdShift += 1 if thresholdShift > 0: print("_start_sample_ISO3382: 20 dB threshold too high. " + "Decreasing it.") if lastBelowThreshold > 0: startSample = lastBelowThreshold else: startSample = 1 return startSample # @njit def _circular_time_shift(timeSignal, threshold=20): # find the first sample where inputSignal level > 20 dB or > bgNoise level startSample = _start_sample_ISO3382(timeSignal, threshold) newTimeSignal = timeSignal[startSample:] return (newTimeSignal, startSample) # @njit def _Lundeby_correction(band, timeSignal, samplingRate, numSamples, numChannels, timeLength, suppressWarnings=True): returnTuple = (np.float32(0), np.float32(0), np.int32(0), np.float32(0)) timeSignal, sampleShift = _circular_time_shift(timeSignal) if sampleShift is None: return returnTuple numSamples -= sampleShift # discount shifted samples numParts = 5 # number of parts per 10 dB decay. N = any([3, 10]) dBtoNoise = 7 # stop point 10 dB above first estimated background noise useDynRange = 15 # dynamic range # Window length - 10 to 50 ms, longer periods for lower frequencies and vice versa repeat = True i = 0 winTimeLength = 0.01 while repeat: # loop to find proper winTimeLength winTimeLength = winTimeLength + 0.01*i # 1) local time average: blockSamples = int(winTimeLength * samplingRate) timeWinData, timeVecWin = _level_profile(timeSignal, samplingRate, numSamples, numChannels, blockSamples) # 2) estimate noise from h^2_averaged(block): bgNoiseLevel = 10 * \ np.log10( np.mean(timeWinData[-int(timeWinData.size/10):])) # 3) Calculate preliminar slope startIdx = np.argmax(np.abs(timeWinData/np.max(np.abs(timeWinData)))) stopIdx = startIdx + np.where(10*np.log10(timeWinData[startIdx+1:]) >= bgNoiseLevel + dBtoNoise)[0][-1] dynRange = 10*np.log10(timeWinData[stopIdx]) \ - 10*np.log10(timeWinData[startIdx]) if (stopIdx == startIdx) or (dynRange > -5)[0]: if not suppressWarnings: print(band, "[Hz] band: SNR too low for the preliminar slope", "calculation.") # return returnTuple # X*c = EDC (energy decaying curve) X = np.ones((stopIdx-startIdx, 2), dtype=np.float32) X[:, 1] = timeVecWin[startIdx:stopIdx, 0] c = np.linalg.lstsq(X, 10*np.log10(timeWinData[startIdx:stopIdx]), rcond=-1)[0] if (c[1] == 0)[0] or np.isnan(c).any(): if not suppressWarnings: print(band, "[Hz] band: regression failed. T would be inf.") # return returnTuple # 4) preliminary intersection crossingPoint = (bgNoiseLevel - c[0]) / c[1] # [s] if (crossingPoint > 2*(timeLength + sampleShift/samplingRate))[0]: if not suppressWarnings: print(band, "[Hz] band: preliminary intersection point between", "bgNoiseLevel and the decay slope greater than signal length.") # return returnTuple # 5) new local time interval length nBlocksInDecay = numParts * dynRange[0] / -10 dynRangeTime = timeVecWin[stopIdx] - timeVecWin[startIdx] blockSamples = int(samplingRate * dynRangeTime[0] / nBlocksInDecay) # 6) average timeWinData, timeVecWin = _level_profile(timeSignal, samplingRate, numSamples, numChannels, blockSamples) oldCrossingPoint = 11+crossingPoint # arbitrary higher value to enter loop loopCounter = 0 while (np.abs(oldCrossingPoint - crossingPoint) > 0.001)[0]: # 7) estimate background noise level (BGL) bgNoiseMargin = 7 idxLast10Percent = int(len(timeWinData)-(len(timeWinData)//10)) bgStartTime = crossingPoint - bgNoiseMargin/c[1] if (bgStartTime > timeVecWin[-1:][0])[0]: idx10dBDecayBelowCrossPoint = len(timeVecWin)-1 else: idx10dBDecayBelowCrossPoint = \ np.where(timeVecWin >= bgStartTime)[0][0] BGL = np.mean(timeWinData[np.min( np.array([idxLast10Percent, idx10dBDecayBelowCrossPoint])):]) bgNoiseLevel = 10*np.log10(BGL) # 8) estimate late decay slope stopTime = (bgNoiseLevel + dBtoNoise - c[0])/c[1] if (stopTime > timeVecWin[-1])[0]: stopIdx = 0 else: stopIdx = int(np.where(timeVecWin >= stopTime)[0][0]) startTime = (bgNoiseLevel + dBtoNoise + useDynRange - c[0])/c[1] if (startTime < timeVecWin[0])[0]: startIdx = 0 else: startIdx = int(np.where(timeVecWin <= startTime)[0][0]) lateDynRange = np.abs(10*np.log10(timeWinData[stopIdx]) \ - 10*np.log10(timeWinData[startIdx])) # where returns empty if stopIdx == startIdx or (lateDynRange < useDynRange)[0]: if not suppressWarnings: print(band, "[Hz] band: SNR for the Lundeby late decay slope too", "low. Skipping!") # c[1] = np.inf c[1] = 0 i += 1 break X = np.ones((stopIdx-startIdx, 2), dtype=np.float32) X[:, 1] = timeVecWin[startIdx:stopIdx, 0] c = np.linalg.lstsq(X, 10*np.log10(timeWinData[startIdx:stopIdx]), rcond=-1)[0] if (c[1] >= 0)[0]: if not suppressWarnings: print(band, "[Hz] band: regression did not work, T -> inf.", "Setting slope to 0!") # c[1] = np.inf c[1] = 0 i += 1 break # 9) find crosspoint oldCrossingPoint = crossingPoint crossingPoint = (bgNoiseLevel - c[0]) / c[1] loopCounter += 1 if loopCounter > 30: if not suppressWarnings: print(band, "[Hz] band: more than 30 iterations on regression.", "Canceling!") break interIdx = crossingPoint * samplingRate # [sample] i += i if c[1][0] != 0: repeat = False if i > 5: if not suppressWarnings: print(band, "[Hz] band: too many iterations to find winTimeLength.", "Canceling!") return returnTuple return c[0][0], c[1][0], np.int32(interIdx[0]), BGL # @njit def energy_decay_calculation(band, timeSignal, timeVector, samplingRate, numSamples, numChannels, timeLength, bypassLundeby, suppressWarnings=True): """Calculate the Energy Decay Curve.""" if bypassLundeby is False: lundebyParams = \ _Lundeby_correction(band, timeSignal, samplingRate, numSamples, numChannels, timeLength, suppressWarnings=suppressWarnings) _, c1, interIdx, BGL = lundebyParams lateRT = -60/c1 if c1 != 0 else 0 else: interIdx = 0 lateRT = 1 if interIdx == 0: interIdx = -1 truncatedTimeSignal = timeSignal[:interIdx, 0] truncatedTimeVector = timeVector[:interIdx] if lateRT != 0.0: if not bypassLundeby: C = samplingRate*BGL*lateRT/(6*np.log(10)) else: C = 0 sqrInv = truncatedTimeSignal[::-1]**2 energyDecayFull = np.cumsum(sqrInv)[::-1] + C energyDecay = energyDecayFull/energyDecayFull[0] else: if not suppressWarnings: print(band, "[Hz] band: could not estimate C factor") C = 0 energyDecay = np.zeros(truncatedTimeVector.size) return (energyDecay, truncatedTimeVector, lundebyParams) def cumulative_integration(inputSignal, bypassLundeby, plotLundebyResults, suppressWarnings=True, **kwargs): """Cumulative integration with proper corrections.""" def plot_lundeby(): c0, c1, interIdx, BGL = lundebyParams fig = plt.figure(figsize=(10, 5)) ax = fig.add_axes([0.08, 0.15, 0.75, 0.8], polar=False, projection='rectilinear', xscale='linear') line = c1*inputSignal.timeVector + c0 ax.plot(inputSignal.timeVector, 10*np.log10(inputSignal.timeSignal**2), label='IR') ax.axhline(y=10*np.log10(BGL), color='#1f77b4', label='BG Noise', c='red') ax.plot(inputSignal.timeVector, line, label='Late slope', c='black') ax.axvline(x=interIdx/inputSignal.samplingRate, label='Truncation point', c='green') ax.grid() ax.set_xlabel('Time [s]') ax.set_ylabel('Amplitude [dBFS]') plt.title('{0:.0f} [Hz]'.format(band)) ax.legend(loc='best', shadow=True, fontsize='x-large') # timeSignal = inputSignal.timeSignal[:] # Substituted by SignalObj.crop in analyse function # timeSignal, sampleShift = _circular_time_shift(timeSignal) # del sampleShift # hSignal = SignalObj(timeSignal, inputSignal.lengthDomain, inputSignal.samplingRate) hSignal = _filter(inputSignal, **kwargs) bands = FOF(nthOct=kwargs['nthOct'], freqRange=[kwargs['minFreq'], kwargs['maxFreq']])[:, 1] listEDC = [] for ch in range(hSignal.numChannels): signal = hSignal[ch] band = bands[ch] energyDecay, energyVector, lundebyParams = \ energy_decay_calculation(band, signal.timeSignal, signal.timeVector, signal.samplingRate, signal.numSamples, signal.numChannels, signal.timeLength, bypassLundeby, suppressWarnings=suppressWarnings) listEDC.append((energyDecay, energyVector)) if plotLundebyResults: # Placed here because Numba can't handle plots. # plot_lundeby(band, timeVector, timeSignal, samplingRate, # lundebyParams) plot_lundeby() return listEDC, hSignal # @njit def reverb_time_regression(energyDecay, energyVector, upperLim, lowerLim): """Interpolate the EDT to get the reverberation time.""" if not np.any(energyDecay): return 0 first = np.where(10*np.log10(energyDecay) >= upperLim)[0][-1] last = np.where(10*np.log10(energyDecay) >= lowerLim)[0][-1] if last <= first: # return np.nan return 0 X = np.ones((last-first, 2)) X[:, 1] = energyVector[first:last] c = np.linalg.lstsq(X, 10*np.log10(energyDecay[first:last]), rcond=-1)[0] return -60/c[1] def reverberation_time(decay, listEDC): """Call the reverberation time regression.""" try: decay = int(decay) y1 = -5 y2 = y1 - decay except ValueError: if decay in ['EDT', 'edt']: y1 = 0 y2 = -10 else: raise ValueError("Decay must be either 'EDT' or an integer \ corresponding to the amount of energy decayed to \ evaluate, e.g. (decay='20' | 20).") RT = [] for ED in listEDC: edc, edv = ED RT.append(reverb_time_regression(edc, edv, y1, y2)) return np.array(RT, dtype='float32') def definition(listEDC: list, fs: int, t: int = 50) -> np.ndarray: """ Room parameter. Parameters ---------- sqrIR : np.ndarray DESCRIPTION. t_ms : int, optional DESCRIPTION. The default is 50. Returns ------- definition : np.ndarray The room "Definition" parameter, in percentage [%]. """ t_ms = t * fs // 1000 definition = np.zeros((len(listEDC), ), dtype='float32') for band, pair in enumerate(listEDC): int_h2 = pair[0][0] # sum of squared IR from start to the end intr_h2_ms = pair[0][t_ms] # sum of squared IR from the interval to the end int_h2_ms = int_h2 - intr_h2_ms # sum of squared IR from start to interval definition[band] = (int_h2_ms / int_h2) # sumSIRt = sqrIR.sum(axis=0) # total sum of squared IR # sumSIRi = sqrIR[:t_ms].sum(axis=0) # sum of initial portion of squared IR # definition = np.round(100 * (sumSIRi / sumSIRt), 2) # [%] return np.round(100 * definition, 2) # [%] def clarity(listEDC: list, fs: int, t: int = 80) -> np.ndarray: """ Room parameter. Parameters ---------- sqrIR : np.ndarray DESCRIPTION. t_ms : int, optional DESCRIPTION. The default is 80. Returns ------- clarity : np.ndarray The room "Clarity" parameter, in decibel [dB]. """ t_ms = t * fs // 1000 clarity = np.zeros((len(listEDC), ), dtype='float32') for band, pair in enumerate(listEDC): int_h2 = pair[0][0] # sum of squared IR from start to the end intr_h2_ms = pair[0][t_ms] # sum of squared IR from the interval to the end int_h2_ms = int_h2 - intr_h2_ms # sum of squared IR from start to interval clarity[band] = 10 * np.log10(int_h2_ms / intr_h2_ms) # [dB] # sumSIRi = sqrIR[:t_ms].sum(axis=0) # sum of initial portion of squared IR # sumSIRe = sqrIR[t_ms:].sum(axis=0) # sum of ending portion of squared IR # clarity = np.round(10 * np.log10(sumSIRi / sumSIRe), 2) # [dB] return np.round(clarity, 2) def central_time(sqrIR: np.ndarray, tstamp: np.ndarray) -> np.ndarray: """ Room parameter. Parameters ---------- sqrIR : np.ndarray Squared room impulsive response. tstamp : np.ndarray Time stamps of each IR sample. Returns ------- central_time : np.ndarray The time instant that balance of energy is equal before and after it. """ sumSIR = sqrIR.sum(axis=0) sumTSIR = (tstamp[:, None] * sqrIR).sum(axis=0) central_time = (sumTSIR / sumSIR) * 1000 # milisseconds return central_time def st_early(listEDC: list, fs: int) -> np.ndarray: """ Room parameter. Parameters ---------- sqrIR : np.ndarray DESCRIPTION. Returns ------- STearly : np.ndarray DESCRIPTION. """ ms = fs // 1000 STearly = np.zeros((len(listEDC), ), dtype='float32') for band, pair in enumerate(listEDC): int_h2 = pair[0][0] # sum of squared IR from start to the end intr_h2_10ms = pair[0][10 * ms] # sum of squared IR from the interval to the end int_h2_10ms = int_h2 - intr_h2_10ms intr_h2_20ms = pair[0][20 * ms] # sum of squared IR from the interval to the end intr_h2_100ms = pair[0][100 * ms] # sum of squared IR from the interval to the end int_h2_20a100ms = intr_h2_20ms - intr_h2_100ms STearly[band] = 10 * np.log10(int_h2_20a100ms / int_h2_10ms) # [dB] # sum10ms = sqrIR[:int(10 * ms)].sum(axis=0) # sum20ms = sqrIR[int(20 * ms):int(100 * ms)].sum(axis=0) # STearly = 10 * np.log10(sum20ms / sum10ms) return np.round(STearly, 4) def st_late(listEDC: list, fs: int) -> np.ndarray: """ Room parameter. Parameters ---------- sqrIR : np.ndarray DESCRIPTION. Returns ------- STlate : np.ndarray DESCRIPTION. """ ms = fs // 1000 STlate = np.zeros((len(listEDC), ), dtype='float32') for band, pair in enumerate(listEDC): int_h2 = pair[0][0] # sum of squared IR from start to the end intr_h2_10ms = pair[0][10 * ms] # sum of squared IR from the interval to the end int_h2_10ms = int_h2 - intr_h2_10ms # sum of squared IR from start to interval intr_h2_100ms = pair[0][100 * ms] # sum of squared IR from the interval to the end STlate[band] = 10 * np.log10(intr_h2_100ms / int_h2_10ms) # [dB] # sum10ms = sqrIR[:int(10 * ms)].sum(axis=0) # sum100ms = sqrIR[int(100 * ms):int(1000 * ms)].sum(axis=0) # STlate = 10 * np.log10(sum100ms / sum10ms) return
np.round(STlate, 4)
numpy.round
#!/usr/bin/python # -*- coding: utf-8 -*- # Scrapes APASS, SDSS, PanSTARRs and Skymapper # Created by <NAME> # Email: <EMAIL> __doc__ = """ Calculate the corrected magnitudes (AB) for sources within an image fits file. This is accomplished by comparing field star observed magnitudes against measured catalogue values and determines the offset. The script currently uses the APASS, SDSS PanSTARRs and Skymapper catalogues. It can calibrate images in the Johnson-Cousins filters (B,V,R) and SDSS filters (u,g,r,i,z). """ __version__ = '1.5' from scipy.optimize import curve_fit from bs4 import BeautifulSoup from astropy.wcs import WCS from astropy.io import fits from matplotlib.patches import Circle import sep import re import argparse import matplotlib.pyplot as plt import numpy as np import os import sys import requests def get_args(): """ Parse command line arguments """ parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('Image File',metavar='FILE',type=str,action='store', help='Name of the image fits file (xxxx.fits)') parser.add_argument('--g',type=float,default=1.1,dest='gain', help='Detector gain in electrons per ADU (default = 1.1)') parser.add_argument('--d',type=str,default=os.getcwd(),dest='directory', help='Desired directory (default = current directory)') parser.add_argument('--sr',type=float,default=5,dest='searchrad', help='Search radius in arcmin (default = 5)') parser.add_argument('--f',type=str,default='V',dest='filter', help='The filter used for the observations. Choices are\ either Johnson B,V,R or SDSS u,g,r,i,z (default = V)') parser.add_argument('--sig',type=float,default=3,dest='sigma', help='Number of sigma to use for clipping outlier stars\ during the magnitude offset calibration (default = 3)') parser.add_argument('--ap',type=float,default=3,dest='aperture', help='Radius of aperture for photometry in pixels (default = 5)') args = parser.parse_args() directory = args.directory gain = args.gain searchrad = args.searchrad waveband = args.filter im_file = args.__dict__['Image File'] sigma = args.sigma aperture = args.aperture # Check for valid filters if (waveband != 'V' and waveband != 'B' and waveband != 'R' and waveband != 'u' and waveband != 'i' and waveband != 'g' and waveband != 'r' and waveband != 'z'): print('Script does not calibrate for this waveband!') sys.exit() return directory,gain,searchrad,waveband,im_file,sigma,aperture def im_phot(directory,gain,im_file,aperture): """ Perform photometry on the image """ # Read in fits image file and create array with wcs coordinates os.chdir(directory) hdulist = fits.open(im_file) w = WCS(im_file) data = hdulist[0].data data[np.isnan(data)] = 0 hdulist.close() # Calculate center point of image (RA, Dec) if not input by user targetra, targetdec = w.all_pix2world(len(data[:,0])/2,len(data[0,:])/2,0) # Use SEP for background subtraction and source detection datasw = data.byteswap().newbyteorder().astype('float64') bkg = sep.Background(datasw) data_bgs = data - bkg data_bgs[data_bgs < 0] = 0 mean = np.mean(data_bgs) median = np.median(data_bgs) std = bkg.globalrms objects = sep.extract(data_bgs,3,err=bkg.globalrms) objra, objdec = w.all_pix2world(objects['x'],objects['y'],0) # Find dummy magnitudes using aperture photometry and plot images fig, ax = plt.subplots() image = plt.imshow(data_bgs,cmap='gray',vmin=(mean-3*std), vmax=(mean+3*std),origin='lower') sepmag = [] sepmagerr = [] ra = [] dec = [] xpixel = [] ypixel = [] for i in range(len(objects)): # Perform circular aperture photometry flux,fluxerr,flag = sep.sum_circle(data_bgs,objects['x'][i], objects['y'][i],aperture,err=std,gain=gain) mag = -2.5*np.log10(flux) maglimit1 = -2.5*np.log10((flux+fluxerr)) maglimit2 = -2.5*
np.log10((flux-fluxerr))
numpy.log10
# -*- coding: utf-8 -*- import numpy as np from scipy.stats import norm from sklearn_extra.cluster import KMedoids from sklearn.metrics import silhouette_score from .spectral import SpectralClustering def dudahart2(X, clustering, alpha=0.001): """ Duda-Hart test for whether a data set should be split into two clusters. The Based on the R implementation of the fpc package Parameters ---------- x : Array-like Data matrix clustering : Array-like or list Vector of integers. Clustering into two clusters alpha : float, optional Numeric betwwen 0 and 1. Significance level (recommended to be small if this is used for estimating the number of clusters), by default 0.001 """ assert isinstance(X, np.ndarray), \ "X must by a Numpy array of shape (n_samples, n_features)" assert len(np.unique(clustering)) == 2, \ "clustering must have labels for 2 clusters" n, p = X.shape values, counts = np.unique(clustering, return_counts=True) W = np.zeros((p, p)) for clus, cln in zip(values, counts): clx = X[clustering == clus, :] cclx = np.cov(clx.T) if cln < 2: cclx = 0 W += (cln - 1) * cclx W1 = (n-1) * np.cov(X.T) dh = np.sum(np.diag(W))/np.sum(np.diag(W1)) z = norm.ppf(1 - alpha) compare = 1 - 2/(np.pi * p) - z*np.sqrt(2 * (1 - 8/(np.pi**2 * p)) / (n*p)) qz = (-dh + 1 - 2/(np.pi * p)) / \ np.sqrt(2 * (1 - 8/(np.pi**2 * p)) / (n*p)) p_value = 1 - norm.cdf(qz) cluster1 = dh >= compare out = {'p_value': p_value, 'dh': dh, 'compare': compare, 'cluster1': cluster1, 'alpha': alpha, 'z': z} return out def pamk(X, krange=np.arange(1, 11), method='pam', n_components=10, alpha=0.001, random_state=None): cluster1 = 1 in krange avg_sw = np. zeros(len(krange)) pams = {1: None} for i, k in enumerate(krange): if k != 1: if method == 'pam': clust_method = KMedoids(n_clusters=k, init='k-medoids++', max_iter=300, random_state=None, method='pam') clust_method.maps_ = X elif method == 'spectral_pam': clust_method = SpectralClustering(n_clusters=k, n_components=n_components, random_state=random_state, assign_labels='kmedoids') else: raise ValueError('Method not implemented') pams[k] = clust_method.fit(X) avg_sw[i] = silhouette_score( clust_method.maps_, clust_method.labels_) k_best = krange[
np.argmax(avg_sw)
numpy.argmax
# helper.py # Defines helper functions import numpy as np # 1.13.3 from scipy.integrate import odeint # 1.0.0 import scipy.optimize as op import scipy.stats as stats import matplotlib.pyplot as plt # 2.1.1 from matplotlib.ticker import AutoMinorLocator from matplotlib.ticker import MaxNLocator import pandas as pd # 0.22.0 import corner # 2.0.1 import progressbar # 3.34.3 import seaborn as sns # 0.8.1 from cycler import cycler # 0.10.0 DIR_DATA = './data/' DIR_PLOTS = './plots/' DIR_OUT = './output/' # This helper file defines plotting functions for the jupyter notebooks ######### 1. PLOT FORMATTING ######### def formatplot(ax,xlabel,ylabel,xlim,ylim,logx=False,logy=False,logxy=False,symlogx=False): # Set titles and labels #ax.set_title('Plot title') if xlabel!=False: ax.set_xlabel(xlabel, labelpad=12) if ylabel!=False: ax.set_ylabel(ylabel, labelpad=12) # Set axis limits if xlim!=False: ax.set_xlim(xlim) if ylim!=False: ax.set_ylim(ylim) # Set tick values # ax.set_xticks([0,0.5,1]) # ax.set_yticks([0,2,4,6,8]) # Set line thicknesses #ax.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter("%1.e")) #ax.axhline(linewidth=2, color='k') #ax.axvline(linewidth=2, color='k') ax.spines['bottom'].set_linewidth(2) ax.spines['top'].set_linewidth(2) ax.spines['left'].set_linewidth(2) ax.spines['right'].set_linewidth(2) # Set ticks if logx==True: ax.set_xscale("log") elif logy==True: ax.set_yscale("log") elif logxy==True: ax.set_xscale("log") ax.set_yscale("log") elif symlogx==True: ax.set_xscale("symlog",linthreshx=1e-4) ax.set_yscale("log") else: minorLocatorx=AutoMinorLocator(2) # Number of minor intervals per major interval minorLocatory=AutoMinorLocator(2) ax.xaxis.set_minor_locator(minorLocatorx) ax.yaxis.set_minor_locator(minorLocatory) ax.tick_params(which='major', width=2, length=8, pad=9,direction='in',top='on',right='on') ax.tick_params(which='minor', width=2, length=4, pad=9,direction='in',top='on',right='on') ######### 2. PLOT TRACES ######### def plottraces(y1,yerr1,y2,yerr2,y3,yerr3,y4,yerr4, titration,name,model_single,model_dual,DIR_PLOTS, numberofmodeltraces=50,samples=0,quant=None): # Plot dose response traces (Figure 5) plt.close("all") my_dpi=150 figure_options={'figsize':(8,6)} #figure size in inches. A4=11.7x8.3. A5=8.27,5.83 font_options={'size':'28','family':'sans-serif','sans-serif':'Arial'} plt.rc('figure', **figure_options) plt.rc('font', **font_options) current_palette=sns.color_palette("deep", 4) plt.rc('axes',prop_cycle=(cycler('color',current_palette))) f, axarr=plt.subplots() plt.subplots_adjust(left=0.3,bottom=0.2,right=0.95,top=0.9) # Plot data axarr.errorbar(titration,y1,yerr=yerr1,fmt='o',ms=12,label='BCB(+)ADD',color='#C4122C') axarr.errorbar(titration,y2,yerr=yerr2,fmt='^',ms=12,label='BCB(-)ADD',color='#228863') if (y3.any()!=None and y4.any()!=None and yerr3.any()!=None and yerr4.any()!=None): axarr.errorbar(titration/2,y3,yerr=yerr3,fmt='s',ms=12,label='BCB',color='#28A0A3') axarr.errorbar(titration/2,y4,yerr=yerr4,fmt='D',ms=12,label='ADD',color='#0A719F') else: pass modelscale=np.linspace(0,1,100) # Plot model if quant!=None: quant1=quant[0] quant2=quant[1] quant3=quant[2] quant4=quant[3] axarr.fill_between(modelscale,quant1[0],quant1[2],color='#C4122C',alpha=0.1) axarr.fill_between(modelscale,quant2[0],quant2[2],color='#228863',alpha=0.1) axarr.fill_between(modelscale,quant3[0],quant3[2],color='#28A0A3',alpha=0.1) axarr.fill_between(modelscale,quant4[0],quant4[2],color='#0A719F',alpha=0.1) axarr.plot(modelscale,quant1[1],'-',color='#C4122C',alpha=1,lw=1.5) axarr.plot(modelscale,quant2[1],'-',color='#228863',alpha=1,lw=1.5) axarr.plot(modelscale,quant3[1],'-',color='#28A0A3',alpha=1,lw=1.5) axarr.plot(modelscale,quant4[1],'-',color='#0A719F',alpha=1,lw=1.5) axarr.set_yticks([0,2e3,4e3]) axarr.ticklabel_format(style='sci', axis='y', scilimits=(0,0),useMathText=True) formatplot(axarr,'ZF DNA ratio','deGFP (RFU)', xlim=([-0.05,1.05]),ylim=[0,4600]) axarr.legend(loc='best', fontsize=18,numpoints=1) plt.savefig(DIR_PLOTS+name+'.pdf',dpi=my_dpi,transparent=True) elif samples.any()!=None: for A,C0,K1,K2,Erp1,Erp2,Er1r2 in samples[np.random.randint(len(samples), size=numberofmodeltraces)]: ypred4=model_single(A,C0,K1,modelscale,Erp1) ypred3=model_single(A,C0,K2,modelscale,Erp2) ypred2=model_dual(A,C0,K1,K2,modelscale/2,modelscale/2,Erp1,Erp2,0) ypred1=model_dual(A,C0,K1,K2,modelscale/2,modelscale/2,Erp1,Erp2,Er1r2) axarr.plot(modelscale,ypred1,'-',color='#C4122C',alpha=0.05) axarr.plot(modelscale,ypred2,'-',color='#228863',alpha=0.05) axarr.plot(modelscale,ypred3,'-',color='#28A0A3',alpha=0.05) axarr.plot(modelscale,ypred4,'-',color='#0A719F',alpha=0.05) formatplot(axarr,'ZF DNA ratio','deGFP (RFU)', xlim=([-0.05,1.05]),ylim=[0,4600]) axarr.legend(loc='best', fontsize=15,numpoints=1) plt.savefig(DIR_PLOTS+name+'.pdf',dpi=my_dpi,transparent=True) def plottracesAA(y1,yerr1,y2,yerr2,titration,name,model_dual, DIR_PLOTS,numberofmodeltraces=50,samples=0,quant=None): plt.close("all") my_dpi=150 figure_options={'figsize':(8,6)} #figure size in inches. A4=11.7x8.3. A5=8.27,5.83 font_options={'size':'28','family':'sans-serif','sans-serif':'Arial'} plt.rc('figure', **figure_options) plt.rc('font', **font_options) current_palette=sns.color_palette("deep", 4) plt.rc('axes',prop_cycle=(cycler('color',current_palette))) f, axarr=plt.subplots() plt.subplots_adjust(left=0.3,bottom=0.2,right=0.95,top=0.9) # Plot data axarr.errorbar(titration,y1,yerr=yerr1,fmt='o',ms=12,label='AA-LZ(+)',color='#C4122C') axarr.errorbar(titration,y2,yerr=yerr2,fmt='^',ms=12,label='AA-LZ(-)',color='#228863') modelscale=np.linspace(0,1,100) # Plot model if quant!=None: quant1=quant[0] quant2=quant[1] axarr.fill_between(modelscale,quant1[0],quant1[2],color='#C4122C',alpha=0.1) axarr.fill_between(modelscale,quant2[0],quant2[2],color='#228863',alpha=0.1) axarr.plot(modelscale,quant1[1],'-',color='#C4122C',alpha=1,lw=1.5) axarr.plot(modelscale,quant2[1],'-',color='#228863',alpha=1,lw=1.5) axarr.set_yticks([0,2e3,4e3]) axarr.ticklabel_format(style='sci', axis='y', scilimits=(0,0),useMathText=True) formatplot(axarr,'ZF DNA ratio','deGFP (RFU)', xlim=([-0.05,1.05]),ylim=[0,4600]) axarr.legend(loc='best', fontsize=18,numpoints=1) plt.savefig(DIR_PLOTS+name+'.pdf',dpi=my_dpi,transparent=True) elif samples.any()!=None: for A,C0,K,Erp,Er1r2 in samples[np.random.randint(len(samples), size=numberofmodeltraces)]: ypred2=model_dual(A,C0,K,K,modelscale/2,modelscale/2,Erp,Erp,0) ypred1=model_dual(A,C0,K,K,modelscale/2,modelscale/2,Erp,Erp,Er1r2) axarr.plot(modelscale,ypred1,'-',color='#C4122C',alpha=0.05) axarr.plot(modelscale,ypred2,'-',color='#228863',alpha=0.05) formatplot(axarr,'ZF DNA ratio','deGFP (RFU)', xlim=([-0.05,1.05]),ylim=[0,4600]) axarr.legend(loc='best', fontsize=15,numpoints=1) plt.savefig(DIR_PLOTS+name+'.pdf',dpi=my_dpi,transparent=True) def plottraces2(data,parameternames,nwalkers,niterations,ZFname,DIR_PLOTS): numberofplots=data.shape[1] plt.close("all") my_dpi=150 figure_options={'figsize':(8.27,5.83)} #figure size in inches. A4=11.7x8.3. font_options={'size':'18','family':'sans-serif','sans-serif':'Arial'} plt.rc('figure', **figure_options) plt.rc('font', **font_options) # Call plots if numberofplots>1: f, axarr=plt.subplots(numberofplots) for i in range(0,numberofplots): for j in range(1,nwalkers+1): axarr[i].plot(np.arange(niterations),data[niterations*j-niterations:niterations*j,i],'k-',lw=0.5) formatplot(axarr[i],False,parameternames[i],xlim=False,ylim=False) else: f, axarr=plt.subplots() for i in range(1,nwalkers+1): axarr.plot(np.arange(niterations),data[niterations*i-niterations:niterations*i],'k-',lw=0.5) formatplot(axarr,False,parameternames[0],xlim=False,ylim=False) axarr[numberofplots-1].set_xlabel('Iterations', labelpad=12) plt.savefig(DIR_PLOTS+ZFname+'trace.pdf',dpi=my_dpi,bbox_inches='tight') def plottracesHill(y1,yerr1,y2,yerr2,y3,yerr3,y4,yerr4, titration,name,Hill,DIR_PLOTS, numberofmodeltraces=50,samples=0,quant=None): plt.close("all") my_dpi=150 figure_options={'figsize':(8.27,5.83)} #figure size in inches. A4=11.7x8.3. A5=8.27,5.83 font_options={'size':'28','family':'sans-serif','sans-serif':'Arial'} plt.rc('figure', **figure_options) plt.rc('font', **font_options) current_palette=sns.color_palette("deep", 4) plt.rc('axes',prop_cycle=(cycler('color',current_palette))) f, axarr=plt.subplots() plt.subplots_adjust(left=0.25,bottom=0.2,right=0.95,top=0.95) # Plot data axarr.errorbar(titration,y1,yerr=yerr1,fmt='o',ms=7,label='BCB+ADD coop',color='#C4122C') axarr.errorbar(titration,y2,yerr=yerr2,fmt='^',ms=7,label='BCB+ADD non-coop',color='#228863') if (y3.any()!=None and y4.any()!=None and yerr3.any()!=None and yerr4.any()!=None): axarr.errorbar(titration/2,y3,yerr=yerr3,fmt='s',ms=7,label='BCB',color='#28A0A3') axarr.errorbar(titration/2,y4,yerr=yerr4,fmt='D',ms=7,label='ADD',color='#0A719F') else: pass modelscale=np.linspace(0,1,100) # Plot model if quant!=None: quant1=quant[0] axarr.fill_between(modelscale,quant1[0],quant1[2],color='k',alpha=0.1) axarr.plot(modelscale,quant1[1],'-',color='k',alpha=1,lw=1.5) formatplot(axarr,'ZF DNA ratio','deGFP (RFU)', xlim=([-0.05,1.05]),ylim=[0,4600]) axarr.legend(loc='best', fontsize=15,numpoints=1) plt.savefig(DIR_PLOTS+name+'.pdf',dpi=my_dpi,transparent=True) elif samples.any()!=None: for y_0,y_1,K,n in samples[np.random.randint(len(samples), size=numberofmodeltraces)]: ypred1=Hill(y_0,y_1,modelscale,K,n) axarr.plot(modelscale,ypred1,'-',color='k',alpha=0.05) formatplot(axarr,'ZF DNA ratio','deGFP (RFU)', xlim=([-0.05,1.05]),ylim=[0,4600]) axarr.legend(loc='best', fontsize=15,numpoints=1) plt.savefig(DIR_PLOTS+name+'.pdf',dpi=my_dpi,transparent=True) def plottraces3L(df3,x,y0,y1,yerr1,y2,yerr2,name,DIR_PLOTS, helixdist,modelhelix, numberofmodeltraces=50,samples=0,quant=None): plt.close("all") my_dpi=150 figure_options={'figsize':(8,6)} #figure size in inches. A4=11.7x8.3. A5=8.27,5.83 font_options={'size':'28','family':'sans-serif','sans-serif':'Arial'} plt.rc('figure', **figure_options) plt.rc('font', **font_options) current_palette=sns.color_palette("deep", 4) plt.rc('axes',prop_cycle=(cycler('color',current_palette))) f, axarr=plt.subplots() plt.subplots_adjust(left=0.3,bottom=0.2,right=0.95,top=0.9) # Plot data axarr.errorbar(df3['Spacing'],df3['Control'],yerr=df3['Cont_err'],fmt='s',ms=12,color='k',label='Unrepressed') axarr.errorbar(df3['Spacing'],df3['Non-cognate'],yerr=df3['NC_err'],fmt='^',ms=12,color='#0A719F',label='Non-cooperative') axarr.errorbar(df3['Spacing'],df3['Cognate'],yerr=df3['C_err'],fmt='o',ms=12,color='#C4122C',label='Cooperative') # Plot model if quant!=None: quant4=quant[3] quant5=quant[4] axarr.fill_between(x,quant4[0],quant4[2],color='#C4122C',alpha=0.1,label='__nolegend__') axarr.fill_between(x,quant5[0],quant5[2],color='#228863',alpha=0.1,label='__nolegend__') axarr.plot(x,quant4[1],'-',color='#C4122C',alpha=1,lw=1.5,label='__nolegend__') axarr.plot(x,quant5[1],'-',color='#228863',alpha=1,lw=1.5,label='__nolegend__') formatplot(axarr,'spacing (bp)','deGFP (RFU)', xlim=([5,32]),ylim=False) axarr.legend(loc='best', fontsize=15,numpoints=1) plt.savefig('plots/'+name+'.pdf',dpi=my_dpi,transparent=True) elif samples.any()!=None: for lnlamb,phi,lnR0 in samples[np.random.randint(len(samples), size=numberofmodeltraces)]: lamb=np.exp(lnlamb) R0=np.exp(lnR0) # Fixed parameters from dose response experiments A=y0 C0=0.66070446353476231 r1=R0/0.16746036268850761 r2=R0/0.0082708295083317035 E10=1.4349132332094823 E2=1.3110190282670602 E120=-3.4804403863425248 xmod=x E1,E12=helixdist(xmod,E10,E120,lamb,phi) p_c,p_nc,p_0=modelhelix(y0,C0,r1,r2,E1,E2,E12) axarr.plot(x,p_0,'-',color='k',alpha=0.05,label='__nolegend__') axarr.plot(x,p_nc,'-',color='#228863',alpha=0.05,label='__nolegend__') axarr.plot(x,p_c,'-',color='#C4122C',alpha=0.05,label='__nolegend__') formatplot(axarr,'spacing (bp)','deGFP (RFU)', xlim=([5,32]),ylim=False) axarr.legend(loc='best', fontsize=15,numpoints=1) plt.savefig(DIR_PLOTS+name+'.pdf',dpi=my_dpi,transparent=True) def plottraces3FR(x,y0,y1,yerr1,y2,yerr2,name,DIR_PLOTS, helixdist,modelhelix, numberofmodeltraces=50,samples=0,quant=None): plt.close("all") my_dpi=150 figure_options={'figsize':(8,6)} #figure size in inches. A4=11.7x8.3. A5=8.27,5.83 font_options={'size':'28','family':'sans-serif','sans-serif':'Arial'} plt.rc('figure', **figure_options) plt.rc('font', **font_options) current_palette=sns.color_palette("deep", 4) plt.rc('axes',prop_cycle=(cycler('color',current_palette))) f, axarr=plt.subplots() plt.subplots_adjust(left=0.3,bottom=0.2,right=0.95,top=0.9) # Plot data axarr.errorbar(x,y1,yerr=yerr1,fmt='o',ms=12,color='#C4122C',label='(+)') axarr.errorbar(x,y2,yerr=yerr2,fmt='^',ms=12,color='#228863',label='(-)') # Plot model if quant!=None: quant1=quant[0] quant2=quant[1] axarr.fill_between(x,quant1[0],quant1[2],color='#C4122C',alpha=0.1,label='__nolegend__') axarr.fill_between(x,quant2[0],quant2[2],color='#228863',alpha=0.1,label='__nolegend__') axarr.plot(x,quant1[1],'-',color='#C4122C',alpha=1,lw=1.5,label='__nolegend__') axarr.plot(x,quant2[1],'-',color='#228863',alpha=1,lw=1.5,label='__nolegend__') formatplot(axarr,'spacing (bp)','fold-repression', xlim=([5,32]),ylim=([0,15])) axarr.legend(loc='best', fontsize=18,numpoints=1) plt.savefig(DIR_PLOTS+name+'.pdf',dpi=my_dpi,transparent=True) elif samples.any()!=None: for lnlamb,phi,lnR0 in samples[np.random.randint(len(samples), size=numberofmodeltraces)]: lamb=np.exp(lnlamb) R0=np.exp(lnR0) # Fixed parameters from dose response experiments A=y0 C0=0.66070446353476231 r1=R0/0.16746036268850761 r2=R0/0.0082708295083317035 E10=1.4349132332094823 E2=1.3110190282670602 E120=-3.4804403863425248 xmod=x E1,E12=helixdist(xmod,E10,E120,lamb,phi) p_c,p_nc,p_0=modelhelix(y0,C0,r1,r2,E1,E2,E12) axarr.plot(x,p_0/p_c,'-',color='#C4122C',alpha=0.05,label='__nolegend__') axarr.plot(x,p_0/p_nc,'-',color='#228863',alpha=0.05,label='__nolegend__') formatplot(axarr,'spacing (bp)','fold-repression', xlim=([5,32]),ylim=False) axarr.legend(loc='best', fontsize=15,numpoints=1) plt.savefig(DIR_PLOTS+name+'.pdf',dpi=my_dpi,transparent=True) def plottraces3CR(x,y0,y1,yerr1,y2,yerr2,name,DIR_PLOTS, helixdist,modelhelix, numberofmodeltraces=50,samples=0,quant=None): plt.close("all") my_dpi=150 figure_options={'figsize':(8,6)} #figure size in inches. A4=11.7x8.3. A5=8.27,5.83 font_options={'size':'28','family':'sans-serif','sans-serif':'Arial'} plt.rc('figure', **figure_options) plt.rc('font', **font_options) current_palette=sns.color_palette("deep", 4) plt.rc('axes',prop_cycle=(cycler('color',current_palette))) f, axarr=plt.subplots() plt.subplots_adjust(left=0.3,bottom=0.2,right=0.95,top=0.9) # Plot data axarr.errorbar(x,y1/y2,yerr=y1/y2*np.sqrt((yerr1/y1)**2+(yerr2/y2)**2),fmt='o',ms=12,color='k') # Plot model if quant!=None: quant3=quant[2] axarr.fill_between(x,quant3[0],quant3[2],color='k',alpha=0.1,label='__nolegend__') axarr.plot(x,quant3[1],'-',color='k',alpha=1,lw=1.5,label='__nolegend__') formatplot(axarr,'spacing (bp)','cooperativity ratio', xlim=([5,32]),ylim=False) plt.savefig(DIR_PLOTS+name+'.pdf',dpi=my_dpi,transparent=True) elif samples.any()!=None: for lnlamb,phi,lnR0 in samples[np.random.randint(len(samples), size=numberofmodeltraces)]: lamb=np.exp(lnlamb) R0=np.exp(lnR0) # Fixed parameters from dose response experiments A=y0 C0=0.66070446353476231 r1=R0/0.16746036268850761 r2=R0/0.0082708295083317035 E10=1.4349132332094823 E2=1.3110190282670602 E120=-3.4804403863425248 xmod=x E1,E12=helixdist(xmod,E10,E120,lamb,phi) p_c,p_nc,p_0=modelhelix(y0,C0,r1,r2,E1,E2,E12) axarr.plot(x,p_nc/p_c,'-',color='k',alpha=0.05,label='__nolegend__') formatplot(axarr,'spacing (bp)','cooperativity ratio', xlim=([5,32]),ylim=False) plt.savefig(DIR_PLOTS+name+'.pdf',dpi=my_dpi,transparent=True) ######### 3. BOXPLOTS ######### def boxplots(ZFname,parameternames,nwalkers,iterations,tburn,DIR_PLOTS,DIR_OUT): # Read data df1=pd.read_csv(DIR_OUT+'samplesout_'+ZFname+'.csv',delimiter=',') data=np.zeros(df1.shape[0]*(df1.shape[1]-1)).reshape(df1.shape[0],(df1.shape[1]-1)) for i in range(0,int(df1.shape[1]-1)): data[:,i]=np.array(df1.iloc[:,i+1]) # Put dataframe into array. Dataframe has no. columns = no. parameters. # Burn-in time correction data2=
np.zeros((df1.shape[0]-tburn*nwalkers)*(df1.shape[1]-1))
numpy.zeros
import pandas as pd import numpy as np from scipy.interpolate import griddata from scipy import ndimage from typing import List, Tuple, Dict, Optional from sklearn.neighbors import NearestNeighbors from .data_helper import low_high_quantile from matplotlib import pyplot as plt from matplotlib import patches, patheffects from mpl_toolkits.axes_grid1 import make_axes_locatable from collections import OrderedDict import statsmodels.api as sm from numpy import ma from matplotlib import cbook from matplotlib.colors import Normalize from matplotlib.colors import LinearSegmentedColormap #colormap from SHAP packakge red_blue = LinearSegmentedColormap('red_blue', { # #1E88E5 -> #ff0052 'red': ((0.0, 30./255, 30./255), (1.0, 255./255, 255./255)), 'green': ((0.0, 136./255, 136./255), (1.0, 13./255, 13./255)), 'blue': ((0.0, 229./255, 229./255), (1.0, 87./255, 87./255)), 'alpha': ((0.0, 1, 1), (0.5, 0.3, 0.3), (1.0, 1, 1)) }) blue_green = LinearSegmentedColormap('blue_green', { # #1E88E5 -> #ff0052 'green': ((0.0, 30./255, 30./255), (1.0, 255./255, 255./255)), 'red': ((0.0, 50./255, 50./255), (1.0, 10./255, 10./255)), 'blue': ((0.0, 229./255, 229./255), (1.0, 87./255, 87./255)), 'alpha': ((0.0, 1, 1), (0.5, 0.3, 0.3), (1.0, 1, 1)) }) blue_green_solid = LinearSegmentedColormap('blue_green_solid', { # #1E88E5 -> #ff0052 'green': ((0.0, 30./255, 30./255), (1.0, 255./255, 255./255)), 'red': ((0.0, 50./255, 50./255), (1.0, 10./255, 10./255)), 'blue': ((0.0, 229./255, 229./255), (1.0, 87./255, 87./255)), 'alpha': ((0.0, 1, 1), (0.5, 1, 1), (1.0, 1, 1)) }) # setting midpoint for colorbar # https://stackoverflow.com/questions/7404116/defining-the-midpoint-of-a-colormap-in-matplotlib class MidPointNorm(Normalize): def __init__(self, midpoint=0, vmin=None, vmax=None, clip=False): Normalize.__init__(self,vmin, vmax, clip) self.midpoint = midpoint def __call__(self, value, clip=None): if clip is None: clip = self.clip result, is_scalar = self.process_value(value) self.autoscale_None(result) vmin, vmax, midpoint = self.vmin, self.vmax, self.midpoint if not (vmin < midpoint < vmax): raise ValueError("midpoint must be between maxvalue and minvalue.") elif vmin == vmax: result.fill(0) # Or should it be all masked? Or 0.5? elif vmin > vmax: raise ValueError("maxvalue must be bigger than minvalue") else: vmin = float(vmin) vmax = float(vmax) if clip: mask = ma.getmask(result) result = ma.array(np.clip(result.filled(vmax), vmin, vmax), mask=mask) # ma division is very slow; we can take a shortcut resdat = result.data #First scale to -1 to 1 range, than to from 0 to 1. resdat -= midpoint resdat[resdat>0] /= abs(vmax - midpoint) resdat[resdat<0] /= abs(vmin - midpoint) resdat /= 2. resdat += 0.5 result = ma.array(resdat, mask=result.mask, copy=False) if is_scalar: result = result[0] return result def inverse(self, value): if not self.scaled(): raise ValueError("Not invertible until scaled") vmin, vmax, midpoint = self.vmin, self.vmax, self.midpoint if cbook.iterable(value): val = ma.asarray(value) val = 2 * (val-0.5) val[val>0] *= abs(vmax - midpoint) val[val<0] *= abs(vmin - midpoint) val += midpoint return val else: val = 2 * (val - 0.5) if val < 0: return val*abs(vmin-midpoint) + midpoint else: return val*abs(vmax-midpoint) + midpoint def plot_shap_dependence(shapVals_df, df, feature='ProppantIntensity_LBSPerFT', feature_disp=None, cmap=plt.cm.coolwarm, s=10, title=None, color_bar=True, color_title=None): feature_disp = feature if feature_disp is None else feature_disp title = feature_disp if title is None else title color_title = 'Feature Impact' if color_title is None else color_title x = df[feature].values y = shapVals_df[feature].values cvals =y clow = np.nanpercentile(cvals, 5) chigh = np.nanpercentile(cvals, 95) norm = MidPointNorm(midpoint=0) if color_bar else MidPointNorm(midpoint=0, vmin=clow, vmax=chigh) # setting vmin/vmax will clip cbar # scalarm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) # scalarm._A = [] cvals_nans = np.isnan(cvals) cval_notNan = np.invert(cvals_nans) fig, ax = plt.subplots(figsize=(8,5)) ax.scatter(x[cvals_nans], y[cvals_nans], s=s, color="#777777", alpha=1, rasterized=len(x) > 500) mapable = ax.scatter(x[cval_notNan], y[cval_notNan], s=s, c=cvals[cval_notNan], cmap=cmap, alpha=1, norm=norm, rasterized=len(x) > 500) if color_bar: cb = colorbar(mapable, size=0.15) cb.set_clim(clow, chigh) # setting vmin/vmaqx here will set even color beyond these numbers # cb = colorbar(scalarm, size=0.15) cb.set_label(color_title, size=13) cb.outline.set_visible(False) cb.set_alpha(1) ax.set_xlabel(feature_disp, fontsize=14) ax.set_ylabel('Feature Impact', fontsize=14) ax.set_title(title, fontsize=14) return ax def nan_to_mean(arr:np.ndarray, axis:int=0)->np.ndarray: '''fills nan with mean over axis . uses masked array to apply mean to complete nan columns np.nanmean() can not do that other option would be to set some kind of spline extrapolation ''' data_m = np.ma.masked_invalid(arr, copy=True) return np.where(np.isnan(arr), data_m.mean(axis=axis), arr) Type_mapout = Tuple[np.ndarray, np.ndarray, Dict[str, np.ndarray], Dict[str, np.ndarray]] def map_grid(df:pd.DataFrame, nxny:Tuple[int]=(500,500), lat_lon_names:List[str]=['Latitude_Mid','Longitude_Mid'])->Type_mapout: '''generates linear interpolated maps return: xi, yi, {col:interpolated}''' zis = {} cols = df.drop(columns=lat_lon_names).columns lat, lon = lat_lon_names y, x = df[lat], df[lon] nx, ny = nxny minx, maxx = x.min(), x.max() miny, maxy = y.min(), y.max() xi = np.linspace(minx, maxx, nx) yi = np.linspace(miny, maxy, ny) for col in cols: zi = griddata((x, y), df[col], (xi[None,:], yi[:,None]), method='linear') zis[col] = zi return xi, yi, zis def blured_map(zis, sigma:float=5.)->Type_mapout: '''generates linear interpolated and blured maps return: xi, yi, {col:interpolated}, {col:blured}''' zibs = {} for col, zi in zis.items(): zi_blurred = nan_to_mean(zi, axis=0) #need so blure not cut nan edges zi_blurred = ndimage.gaussian_filter(zi_blurred, sigma=sigma) zi_blurred[np.isnan(zi)] = np.nan zibs[col] = zi_blurred return zibs def plot_contour_map(xi:np.ndarray, yi:np.ndarray, zi:np.ndarray, mask:Optional=None, n_conturs:int=15, ax:Optional=None, fig:Optional=None, figsize=(10,10), vminmax:Optional=None, addColorbar=True, colorbarLabel=None, args={}, argsf={}): if ax is None: fig, ax = plt.subplots(figsize=figsize) if mask is not None: zi = np.ma.masked_where(~mask, zi) vmin, vmax = low_high_quantile(pd.Series(zi.flatten()),1/100) if vminmax is None else vminmax cs = ax.contourf(xi ,yi, zi, n_conturs, vmin=vmin, vmax=vmax, antialiased=True, **argsf) ax.contour(xi, yi, zi, n_conturs, linewidths=0.5, colors='k', antialiased=True, **args) #add vm ax.set_aspect(1) cbar =colorbar(cs, label=colorbarLabel) if addColorbar else None return fig, ax, cbar def mask_by_dist(df, col, xi, yi, radius=0.3, lon_lat_names:List[str]=['Longitude_Mid', 'Latitude_Mid']): nx, ny = len(xi), len(yi) xm, ym = np.meshgrid(xi, yi) Xtrn = df[lon_lat_names] Xtest = pd.DataFrame({'x':xm.flatten(), 'y':ym.flatten()}) nbrs = NearestNeighbors(n_neighbors=1, algorithm='auto').fit(Xtrn, df[col]) rad, index = nbrs.radius_neighbors(Xtest, radius=radius, return_distance=True) mask = np.array([(True if len(x)>0 else False) for x in rad]).reshape((ny,nx)) return mask def fence_draw(gf, ax, latlon=['lat', 'lon'], **args): ''' takes fennce coord E.G. geo_fence={'lon':(-98, -97.73), 'lat': (28.83, 29.19)} adds patch to axes ''' lat, lon = latlon dlon = gf[lon][1]-gf[lon][0] dlat = gf[lat][1]-gf[lat][0] rect = patches.Rectangle((gf[lon][0],gf[lat][0]),dlon,dlat,linewidth=1,edgecolor='r',facecolor='none', **args) ax.add_patch(rect) def colorbar(mappable, ax=None, location='right', size="5%", pad=0.05, **args): if ax is None: try: ax = mappable.axes except: ax = mappable.ax # for contour plots fig = ax.figure divider = make_axes_locatable(ax) cax = divider.append_axes(location, size=size, pad=pad) return fig.colorbar(mappable, cax=cax, **args) def draw_outline(o, lw): '''from fastai''' o.set_path_effects([patheffects.Stroke( linewidth=lw, foreground='black'), patheffects.Normal()]) def draw_text(ax, xy, txt, sz=14, outsz=2): '''from fastai''' #ax.annotate(txt, (df[lon].iloc[i], df[lat].iloc[i])) text = ax.text(*xy, txt, verticalalignment='top', color='white', fontsize=sz)#, weight='bold') draw_outline(text, outsz) def draw_rect(ax, b): '''from fastai''' patch = ax.add_patch(patches.Rectangle(b[:2], *b[-2:], fill=False, edgecolor='white', lw=2)) draw_outline(patch, 4) def plot_pdp_std(wells_ice, smooth=True, zero_start=False, frac=0.15, ax=None, xlabel=None, ylabel='annual boe/1000ft', title='Completion Impact', quantile=True, addStd=True, addLegend=True, argF={'alpha':0.2}, argPDP={}, figsize=(12,7)): '''plot median line with 25, 75% quintiles [default] or mean with +-std''' if ax is None: fig, ax = plt.subplots(figsize=figsize) if smooth: lowess = sm.nonparametric.lowess for api, ice in wells_ice.items(): if zero_start: ice = ice.sub(ice.iloc[:,0], axis=0) describe = ice.describe() # gives mean std and quintile values ice_pdp = describe.loc['50%'] if quantile else describe.loc['mean'] ice_upper = describe.loc['75%'] if quantile else describe.loc['mean'] + describe.loc['std'] ice_lower = describe.loc['25%'] if quantile else describe.loc['mean'] - describe.loc['std'] upper = ice_upper.values lower = ice_lower.values pdp = ice_pdp.values if smooth: pdp = lowess(ice_pdp.values,
np.array(ice.columns)
numpy.array
""" Segment song motifs by finding maxima in spectrogram cross correlations. """ __date__ = "April 2019 - November 2020" from affinewarp import ShiftWarping import h5py from itertools import repeat from joblib import Parallel, delayed import matplotlib.pyplot as plt plt.switch_backend('agg') try: # Numba >= 0.52 from numba.core.errors import NumbaPerformanceWarning except ModuleNotFoundError: try: # Numba <= 0.45 from numba.errors import NumbaPerformanceWarning except (NameError, ModuleNotFoundError): pass import numpy as np from scipy.io import wavfile from scipy.io.wavfile import WavFileWarning from scipy.signal import stft from scipy.ndimage.filters import gaussian_filter import os import umap import warnings from ava.plotting.tooltip_plot import tooltip_plot EPSILON = 1e-9 def get_template(feature_dir, p, smoothing_kernel=(0.5, 0.5), verbose=True): """ Create a linear feature template given exemplar spectrograms. Parameters ---------- feature_dir : str Directory containing multiple audio files to average together. p : dict Parameters. Must contain keys: ``'fs'``, ``'min_freq'``, ``'max_freq'``, ``'nperseg'``, ``'noverlap'``, ``'spec_min_val'``, ``'spec_max_val'``. smoothing_kernel : tuple of floats, optional Each spectrogram is blurred using a gaussian kernel with the following bandwidths, in bins. Defaults to ``(0.5, 0.5)``. verbose : bool, optional Defaults to ``True``. Returns ------- template : np.ndarray Spectrogram template. """ filenames = [os.path.join(feature_dir, i) for i in os.listdir(feature_dir) \ if _is_wav_file(i)] specs = [] for i, filename in enumerate(filenames): with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=WavFileWarning) fs, audio = wavfile.read(filename) assert fs == p['fs'], "Found samplerate="+str(fs)+\ ", expected "+str(p['fs']) spec, dt = _get_spec(fs, audio, p) spec = gaussian_filter(spec, smoothing_kernel) specs.append(spec) min_time_bins = min(spec.shape[1] for spec in specs) specs = np.array([i[:,:min_time_bins] for i in specs]) # Average over all the templates. template = np.mean(specs, axis=0) # Normalize to unit norm. template -= np.mean(template) template /= np.sum(np.power(template, 2)) + EPSILON if verbose: duration = min_time_bins * dt print("Made template from", len(filenames), "files. Duration:", duration) return template def segment_files(audio_dirs, segment_dirs, template, p, num_mad=2.0, \ min_dt=0.05, n_jobs=1, verbose=True): """ Write segments to text files. Parameters ---------- audio_dirs : list of str Audio directories. segment_dirs : list of str Corresponding directories containing segmenting decisions. template : numpy.ndarray Spectrogram template. p : dict Parameters. Must contain keys: ``'fs'``, ``'min_freq'``, ``'max_freq'``, ``'nperseg'``, ``'noverlap'``, ``'spec_min_val'``, ``'spec_max_val'``. num_mad : float, optional Number of median absolute deviations for cross-correlation threshold. Defaults to ``2.0``. min_dt : float, optional Minimum duration between cross correlation maxima. Defaults to ``0.05``. n_jobs : int, optional Number of jobs for parallelization. Defaults to ``1``. verbose : bool, optional Defaults to ``True``. Returns ------- result : dict Maps audio filenames to segments (numpy.ndarrays). """ # Collect all the filenames we need to parallelize. all_audio_fns = [] all_seg_dirs = [] for audio_dir, segment_dir in zip(audio_dirs, segment_dirs): if not os.path.exists(segment_dir): os.makedirs(segment_dir) audio_fns = [os.path.join(audio_dir, i) for i in os.listdir(audio_dir) \ if _is_wav_file(i)] all_audio_fns = all_audio_fns + audio_fns all_seg_dirs = all_seg_dirs + [segment_dir]*len(audio_fns) # Segment. if verbose: print("Segmenting files. n =",len(all_audio_fns)) gen = zip(all_seg_dirs, all_audio_fns, repeat(template), repeat(p), \ repeat(num_mad), repeat(min_dt)) res = Parallel(n_jobs=n_jobs)(delayed(_segment_file)(*args) for args in gen) # Write results. result = {} num_segments = 0 for segment_dir, audio_fn, segments in res: result[audio_fn] = segments segment_fn = os.path.split(audio_fn)[-1][:-4] + '.txt' segment_fn = os.path.join(segment_dir, segment_fn) np.savetxt(segment_fn, segments, fmt='%.5f') num_segments += len(segments) if verbose: print("\tFound", num_segments, "segments.") print("\tDone.") # Return a dictionary mapping audio filenames to segments. return result def read_segment_decisions(audio_dirs, segment_dirs, verbose=True): """ Returns the same data as ``segment_files``. Parameters ---------- audio_dirs : list of str Audio directories. segment_dirs : list of str Segment directories. verbose : bool, optional Defaults to ``True``. Returns ------- result : dict Maps audio filenames to segments. """ if verbose: print("Reading segments...") result = {} n_segs = 0 for audio_dir, segment_dir in zip(audio_dirs, segment_dirs): audio_fns = [os.path.join(audio_dir, i) for i in os.listdir(audio_dir) \ if _is_wav_file(i)] for audio_fn in audio_fns: segment_fn = os.path.split(audio_fn)[-1][:-4] + '.txt' segment_fn = os.path.join(segment_dir, segment_fn) segments = np.loadtxt(segment_fn).reshape(-1,2) result[audio_fn] = segments n_segs += len(segments) if verbose: print("\tFound", n_segs, "segments.") print("\tDone.") return result def _segment_file(segment_dir, filename, template, p, num_mad=2.0, min_dt=0.05,\ min_extra_time_bins=5): """ Match linear spetrogram features and extract times where features align. Parameters ---------- segment_dir : str Segment directory. filename : str Audio filename. template : numpy.ndarray Spectrogram template. p : dict Parameters. Must contain keys: ``'fs'``, ``'min_freq'``, ``'max_freq'``, ``'nperseg'``, ``'noverlap'``, ``'spec_min_val'``, ``'spec_max_val'``. num_mad : float, optional Number of median absolute deviations for cross-correlation threshold. Defaults to ``2.0``. min_dt : float, optional ... min_extra_time_bins : int, optional ... Returns ------- segment_dir : str Copied from input parameters. filename : str Copied from input parameters. segments : numpy.ndarray Onsets and offsets. """ with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=WavFileWarning) fs, audio = wavfile.read(filename) assert fs == p['fs'], "Found samplerate="+str(fs)+", expected "+str(p['fs']) if len(audio) < p['nperseg']: warnings.warn( "Found an audio file that is too short to make a spectrogram: "+\ filename + "\nSamples: "+str(len(audio))+"\np[\'nperseg\']: "+\ str(p['nperseg']), UserWarning ) return segment_dir, filename, np.zeros((0, 2)) big_spec, dt = _get_spec(fs, audio, p) spec_len = template.shape[1] template = template.flatten() if big_spec.shape[1] - spec_len < min_extra_time_bins: d1, d2 = dt*spec_len, dt*big_spec.shape[1] warnings.warn( "Found an audio file that is too short to extract segments from: "+\ filename + "\nTemplate duration: "+str(d1)+"\nFile duration: "+\ str(d2)+"\nConsider reducing the template duration.", UserWarning ) return segment_dir, filename, np.zeros((0, 2)) # Compute normalized cross-correlation. result = np.zeros(big_spec.shape[1] - spec_len) for i in range(len(result)): temp = big_spec[:,i:i+spec_len].flatten() temp -= np.mean(temp) temp /= np.sum(np.power(temp, 2)) + EPSILON result[i] = np.dot(template, temp) median = np.median(result) abs_devs = np.abs(result - median) mad = np.median(abs_devs) + EPSILON # Get maxima. times = dt * np.arange(len(result)) indices = np.argwhere(result>median + num_mad*mad).flatten()[1:-1] max_indices = [] for i in range(2,len(indices)-1): if max(result[indices[i]-1], result[indices[i]+1]) < result[indices[i]]: max_indices.append(indices[i]) max_indices = np.array(max_indices, dtype='int') max_indices = _clean_max_indices(max_indices, times, result, min_dt=min_dt) # Define onsets/offsets. segments = np.zeros((len(max_indices), 2)) segments[:,0] = dt * max_indices # onsets segments[:,1] = segments[:,0] + spec_len * dt return segment_dir, filename, segments def clean_collected_data(result, audio_dirs, segment_dirs, p, \ max_num_specs=10000, verbose=True, img_fn='temp.pdf', \ tooltip_plot_dir='html'): """Deprecated. See ``clean_collected_segments``.""" warnings.warn( "ava.segmenting.template_segmentation.clean_collected_data has been" + \ " renamed to clean_collected_segments in v0.3.0.", UserWarning ) clean_collected_segments(result, audio_dirs, segment_dirs, p, \ max_num_specs=max_num_specs, verbose=verbose, img_fn=img_fn, \ tooltip_plot_dir=tooltip_plot_dir) def clean_collected_segments(result, audio_dirs, segment_dirs, p, \ max_num_specs=10000, verbose=True, img_fn='temp.pdf', \ tooltip_plot_dir='html'): """ Take a look at the collected segments and discard false positives. Parameters ---------- result : dict Output of ``segment_files`` or `read_segment_decisions``. audio_dirs : list of str Directories containing audio. segment_dirs : list of str Directories containing segmenting decisions. p : dict Parameters. Must contain keys: ``'fs'``, ``'min_freq'``, ``'max_freq'``, ``'nperseg'``, ``'noverlap'``, ``'spec_min_val'``, ``'spec_max_val'``. max_num_specs : int, optional Maximum number of spectrograms to feed to UMAP. Deafults to ``10000``. verbose : bool, optional Defaults to ``True``. img_fn : str, optional Image filename. Defaults to ``'temp.pdf'``. tooltip_plot_dir : str, optional Directory to save tooltip plot to. Defaults to ``'html'``. """ # Collect spectrograms. if verbose: print("Collecting spectrograms...") specs = [] for filename in result.keys(): with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=WavFileWarning) fs, audio = wavfile.read(filename) assert fs == p['fs'], "Found samplerate=" + str(fs) + \ ", expected " + str(p['fs']) for segment in result[filename]: i1 = int(round(segment[0] * fs)) i2 = int(round(segment[1] * fs)) spec, dt = _get_spec(fs, audio[i1:i2], p) specs.append(spec) if len(specs) == 0: warnings.warn( "Found no spectrograms in " + \ "ava.segmenting.template_segmentation.clean_collected_segments.\n" \ + "Consider reducing the `num_mad` parameter in `segment_files`.", UserWarning ) return max_t = max(spec.shape[1] for spec in specs) temp_specs = np.zeros((len(specs), specs[0].shape[0], max_t)) for i, spec in enumerate(specs): temp_specs[i,:,:spec.shape[1]] = spec specs = temp_specs if len(specs) > max_num_specs: warnings.warn( "Found more spectrograms than `max_num_specs` (" + \ str(max_num_specs) + "). Consider increasing `max_num_specs` or" + \ " `num_mad`.", UserWarning ) if verbose: print("\tCollected",len(specs),"spectrograms.") print("\tSpectrogram shape:", specs.shape[1:]) if len(specs) > max_num_specs: print("\tRandomly sampling", max_num_specs, "spectrograms.") print("\tDone.") np.random.seed(42) specs = specs[np.random.permutation(len(specs))[:max_num_specs]] np.random.seed(None) # UMAP the spectrograms. if verbose: print("Running UMAP. n =", len(specs)) transform = umap.UMAP(random_state=42, metric='correlation') # https://github.com/lmcinnes/umap/issues/252 with warnings.catch_warnings(): try: warnings.filterwarnings("ignore", \ category=NumbaPerformanceWarning) except NameError: pass embedding = transform.fit_transform(specs.reshape(len(specs), -1)) if verbose: print("\tDone.") # Plot and ask for user input. bounds = { 'x1s':[], 'x2s':[], 'y1s':[], 'y2s':[], } bounds_keys = ['x1s', 'x2s', 'y1s', 'y2s'] queries = ['x1: ', 'x2: ', 'y1: ', 'y2: '] X, Y = embedding[:,0], embedding[:,1] i = 0 while True: colors = ['b' if _in_region(embed, bounds) else 'r' for \ embed in embedding] print("Selected", \ len([c for c in colors if c=='b']), "out of", len(colors)) plt.scatter(X, Y, c=colors, s=0.9, alpha=0.5) for x_tick in np.arange(np.floor(np.min(X)), np.ceil(np.max(X))): plt.axvline(x=x_tick, c='k', alpha=0.1, lw=0.5) for y_tick in np.arange(np.floor(np.min(Y)), np.ceil(np.max(Y))): plt.axhline(y=y_tick, c='k', alpha=0.1, lw=0.5) title = "Find relevant song" plt.title(title) plt.savefig(img_fn) plt.close('all') # Plot the tooltip plot. if i == 0: if verbose: print("Writing tooltip plot...") tooltip_plot(embedding, specs, output_dir=tooltip_plot_dir, \ num_imgs=1000, title=title, grid=True) if verbose: print("\tDone.") # Get input from user. for key, query in zip(bounds_keys, queries): answer = 'initial input' while not _is_number(answer): answer = input(query) bounds[key].append(float(answer)) # Continue? temp = input('[Enter] to select more regions, [c] to continue: ') if temp == 'c': break i += 1 # Save only the good segments. if verbose: print("Saving segments...") num_deleted, num_total = 0, 0 for audio_dir, seg_dir in zip(audio_dirs, segment_dirs): audio_fns = [os.path.join(audio_dir, i) for i in os.listdir(audio_dir) \ if _is_wav_file(i)] for audio_fn in audio_fns: with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=WavFileWarning) fs, audio = wavfile.read(audio_fn) assert fs == p['fs'], "Found samplerate=" + str(fs) + \ ", expected " + str(p['fs']) segment_fn = os.path.split(audio_fn)[-1][:-4] + '.txt' segment_fn = os.path.join(seg_dir, segment_fn) segments = np.loadtxt(segment_fn).reshape(-1,2) if len(segments) == 0: continue new_segments = np.zeros(segments.shape) i = 0 specs = [] for segment in segments: i1 = int(round(segment[0] * fs)) i2 = int(round(segment[1] * fs)) spec, dt = _get_spec(fs, audio[i1:i2], p) temp_spec = np.zeros((spec.shape[0], max_t)) temp_spec[:, :spec.shape[1]] = spec spec = temp_spec specs.append(spec) specs = np.stack(specs) embed = transform.transform(specs.reshape(specs.shape[0],-1)) for j, segment in enumerate(segments): if _in_region(embed[j], bounds): new_segments[i] = segment[:] i += 1 num_total += 1 else: num_deleted += 1 new_segments = new_segments[:i] np.savetxt(segment_fn, new_segments, fmt='%.5f') if verbose: print("\tdeleted:", num_deleted, "remaining:", num_total) print("\tDone.") def segment_sylls_from_songs(audio_dirs, song_seg_dirs, syll_seg_dirs, p, \ shoulder=0.05, img_fn='temp.pdf', verbose=True): """ Split song renditions into syllables, write segments. Enter quantiles to determine where to split the song motif. Entering the same quantile twice will remove it. Note ---- * All the song segments must be the same duration! Parameters ---------- audio_dirs : list of str Audio directories. song_seg_dirs : list of str Directories containing song segments. syll_seg_dirs : list of str Directories where syllable segments are written. p : dict Segmenting parameters. shoulder : float, optional Duration of padding on either side of song segments, in seconds. img_fn : str, optional Image filename. Defaults to ``'temp.pdf'``. verbose : bool, optional Defaults to `True`. """ # Read segments. song_segs = read_segment_decisions(audio_dirs, song_seg_dirs) # Collect spectrograms. empty_audio_files = [] specs, fns, song_onsets = [], [], [] for audio_fn in song_segs: with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=WavFileWarning) fs, audio = wavfile.read(audio_fn) for seg in song_segs[audio_fn].reshape(-1,2): # Make spectrogram. onset, offset = seg[0] - shoulder, seg[1] + shoulder i1, i2 = int(fs*onset), int(fs*offset) spec, dt = _get_spec(fs, audio[max(i1,0):i2], p) # Pad spectrogram if it's near the edge of the file. if i1 < 0 or i2 > len(audio): pre_bins = max(0, int(np.round(-i1/fs/dt))) post_bins = max(0, int(np.round((i2 - len(audio))/fs/dt))) new_spec = np.mean(spec) * \ np.ones((spec.shape[0], spec.shape[1]+pre_bins+post_bins)) if post_bins == 0: post_bins = -new_spec.shape[1] new_spec[pre_bins:-post_bins] spec = new_spec specs.append(spec) fns.append(audio_fn) song_onsets.append(onset) if len(song_segs[audio_fn]) == 0: empty_audio_files.append(audio_fn) assert len(specs) > 0, "Found no spectrograms!" # Calculate and smooth amplitude traces. amp_traces = [] for spec in specs: amps = np.sum(spec, axis=0) amps -= np.mean(amps) amps /= np.std(amps) + EPSILON amp_traces.append(amps) # Truncate the amplitude traces if they aren't exactly the same length. min_time_bins = min(len(amp_trace) for amp_trace in amp_traces) max_time_bins = max(len(amp_trace) for amp_trace in amp_traces) if verbose and (min_time_bins != max_time_bins): print("Found different numbers of time bins in segments!") print("\tmin:" + str(min_time_bins) + ", max:", max_time_bins) print("\tTruncating to minimum number of time bins.") if min_time_bins != max_time_bins: amp_traces = [amp_trace[:min_time_bins] for amp_trace in amp_traces] amp_traces = np.array(amp_traces) # Warp the amplitude traces. max_t = amp_traces.shape[1]*dt*1e3 num_time_bins = amp_traces.shape[1] model = ShiftWarping(maxlag=0.2, smoothness_reg_scale=10.0) model.fit(amp_traces[:,:,np.newaxis], iterations=50) aligned = model.predict().squeeze() max_raw_val = np.max(amp_traces) max_aligned_val = np.max(aligned) shifts = model.shifts quantiles = [] break_flag = False while True: # Plot. _, axarr = plt.subplots(3,1, sharex=True) axarr[0].imshow(specs[np.random.randint(len(specs))], origin='lower', \ aspect='auto', extent=[0,max_t,p['min_freq']/1e3, \ p['max_freq']/1e3]) temp = np.copy(amp_traces) for q in quantiles: for i in range(len(temp)): try: temp[i,int(round(q*num_time_bins))+shifts[i]] = max_raw_val except IndexError: pass axarr[1].imshow(temp, origin='lower', aspect='auto', \ extent=[0,max_t,0,len(amp_traces)]) temp = np.copy(aligned) for q in quantiles: for i in range(len(temp)): temp[i,int(round(q*num_time_bins))] = max_aligned_val axarr[2].imshow(temp, origin='lower', aspect='auto', \ extent=[0,max_t,0,len(amp_traces)]) axarr[0].set_ylabel("Frequency (kHz)") axarr[1].set_ylabel('Amplitude') axarr[2].set_ylabel('Shifted') axarr[0].set_title('Enter segmenting quantiles:') axarr[2].set_xlabel('Time (ms)') plt.savefig(img_fn) plt.close('all') # Ask for segmenting decisions. while True: temp = input("Add or delete quantile or [s]top: ") if temp == 's': break_flag = True break try: temp = float(temp) assert 0.0 < temp and temp < 1.0 if temp in quantiles: quantiles.remove(temp) else: quantiles.append(temp) break except: print("Invalid input!") print("Must be \'s\' or a float between 0 and 1.") continue if break_flag: break # Write syllable segments. if verbose: print("Writing syllable segments...") duration = num_time_bins * dt quantiles =
np.array(quantiles)
numpy.array
import matplotlib.pyplot as plots import numpy as np from scipy.stats import norm def plot_function(D,input_node, mu, sigma, batch_size, sess): figure,axis=plots.subplots(1) xaxis=np.linspace(-6,6,1000) axis.plot(xaxis, norm.pdf(xaxis,loc=mu,scale=sigma), label='p_distribution') r=1000 xaxis=np.linspace(-6,6,r) decisor_beginning=np.zeros((r,1)) for i in range(int(r/batch_size)): x=np.reshape(xaxis[batch_size*i:batch_size*(i+1)],(batch_size,1)) decisor_beginning[batch_size*i:batch_size*(i+1)]=sess.run(D,{input_node: x}) axis.plot(xaxis, decisor_beginning, label='decision boundary', color ='black') axis.set_ylim(0,1.1) plots.legend() def plot_function2(dis, gen, sigma, mu, batch_size, sess, x_input_node, z_input_node): figure,axis=plots.subplots(1) xaxis=np.linspace(-6,6,1000) axis.plot(xaxis, norm.pdf(xaxis,loc=mu,scale=sigma), label='p_original_distribution') r=1000 xaxis=np.linspace(-6,6,r) decisor=np.zeros((r,1)) for i in range(int(r/batch_size)): x=np.reshape(xaxis[batch_size*i:batch_size*(i+1)],(batch_size,1)) decisor[batch_size*i:batch_size*(i+1)]=sess.run(dis,{x_input_node: x}) axis.plot(xaxis, decisor, label='decision boundary',color='black') zs=np.linspace(-6,6,r) generated=np.zeros((r,1)) for i in range(int(r/batch_size)): z=
np.reshape(zs[batch_size*i:batch_size*(i+1)],(batch_size,1))
numpy.reshape
from __future__ import absolute_import, division, print_function import numpy as np def compute_rhog(psi1r, psi2r, ft, rho1g=None, rho2g=None): """Compute rho(G, -G) for two electrons occupying two (KS) orbitals. rho(G, -G) is defined as f1(G) * f2(-G) - |f3(G)|^2, which is equal to f1(G) * conj(f2(G)) - f3(G) * conj(f3(G)) f1, f2 and f3 are defined following PRB 77, 035119 (2008): f1(r) = |psi1(r)|^2 f2(r) = |psi2(r)|^2 f3(r) = conj(psi1(r)) * psi2(r) f1(G), f2(G) and f3(G) are obtained by Fourier Transform of f1(r), f2(r) and f3(r) rho(r) is computed for debug purpose as inverse FT of rho(G, -G) and returned as well Args: psi1r, psi2r (np.ndarray): R space wavefunction for electron 1 and 2. ft (..common.ft.FourierTransform): FT which defines grid size. rho1g, rho2g (np.ndarray): R space charge density for electron 1 and 2. If not provided, will be computed from psi1r and psi2r. Returns: rho(G, -G) as a np.ndarray of shape (ft.N, ft.N). """ if rho1g is not None: assert rho1g.shape == psi1r.shape f1g = rho1g else: f1r = psi1r * np.conj(psi1r) f1g = ft.forward(f1r) if rho2g is not None: assert rho2g.shape == psi2r.shape f2g = rho2g else: f2r = psi2r * np.conj(psi2r) f2g = ft.forward(f2r) f3r = psi1r * np.conj(psi2r) f3g = ft.forward(f3r) #rhoj = f1g * np.conj(f2g) #rhok = f3g * np.conj(f3g) rhog = f1g * np.conj(f2g) - f3g * np.conj(f3g) #rhor = ft.backward(rhog) return rhog #, rhor, rhoj, rhok def compute_delta_model_rhog(cell, ft, d1, d2, d3, s=1): """Compute rho(G, -G) for two point dipoles. Two spin dipoles are approximated homogeneious dipole gas in small boxes Args: cell (..common.cell.Cell): Cell on which to compute ddig. ft (..common.ft.FourierTransform): FT which defines grid size. d1, d2, d3 (float): distance between two dipoles in 3 dimensions s (float): box size Returns: rho(G, -G) as a np.ndarray of shape (ft.N, ft.N) """ n1, n2, n3, N = ft.n1, ft.n2, ft.n3, ft.N R1, R2, R3 = cell.R1, cell.R2, cell.R3 omega = cell.omega ns1 = int(n1 * s / R1[0]) ns2 = int(n2 * s / R2[1]) ns3 = int(n3 * s / R3[2]) nd1 = int(n1 * d1 / R1[0]) nd2 = int(n2 * d2 / R2[1]) nd3 = int(n3 * d3 / R3[2]) print(ns1, ns2, ns3) print(nd1, nd2, nd3) print("effective d1, d2, d3: ", nd1 * R1[0] / n1, nd2 * R2[1] / n2, nd3 * R3[2] / n3) psi1r =
np.zeros([n1, n2, n3])
numpy.zeros
from typing import List, Set, Tuple, Dict import numpy from stog.utils.checks import ConfigurationError def decode_mst(energy: numpy.ndarray, length: int, has_labels: bool = True) -> Tuple[numpy.ndarray, numpy.ndarray]: """ Note: Counter to typical intuition, this function decodes the _maximum_ spanning tree. Decode the optimal MST tree with the Chu-Liu-Edmonds algorithm for maximum spanning arboresences on graphs. Parameters ---------- energy : ``numpy.ndarray``, required. A tensor with shape (num_labels, timesteps, timesteps) containing the energy of each edge. If has_labels is ``False``, the tensor should have shape (timesteps, timesteps) instead. length : ``int``, required. The length of this sequence, as the energy may have come from a padded batch. has_labels : ``bool``, optional, (default = True) Whether the graph has labels or not. """ if has_labels and energy.ndim != 3: raise ConfigurationError("The dimension of the energy array is not equal to 3.") elif not has_labels and energy.ndim != 2: raise ConfigurationError("The dimension of the energy array is not equal to 2.") input_shape = energy.shape max_length = input_shape[-1] # Our energy matrix might have been batched - # here we clip it to contain only non padded tokens. if has_labels: energy = energy[:, :length, :length] # get best label for each edge. label_id_matrix = energy.argmax(axis=0) energy = energy.max(axis=0) else: energy = energy[:length, :length] label_id_matrix = None # get original score matrix original_score_matrix = energy # initialize score matrix to original score matrix score_matrix = numpy.array(original_score_matrix, copy=True) old_input = numpy.zeros([length, length], dtype=numpy.int32) old_output = numpy.zeros([length, length], dtype=numpy.int32) current_nodes = [True for _ in range(length)] representatives: List[Set[int]] = [] for node1 in range(length): original_score_matrix[node1, node1] = 0.0 score_matrix[node1, node1] = 0.0 representatives.append({node1}) for node2 in range(node1 + 1, length): old_input[node1, node2] = node1 old_output[node1, node2] = node2 old_input[node2, node1] = node2 old_output[node2, node1] = node1 final_edges: Dict[int, int] = {} # The main algorithm operates inplace. chu_liu_edmonds(length, score_matrix, current_nodes, final_edges, old_input, old_output, representatives) heads = numpy.zeros([max_length], numpy.int32) if has_labels: head_type = numpy.ones([max_length], numpy.int32) else: head_type = None for child, parent in final_edges.items(): heads[child] = parent if has_labels: head_type[child] = label_id_matrix[parent, child] return heads, head_type def chu_liu_edmonds(length: int, score_matrix: numpy.ndarray, current_nodes: List[bool], final_edges: Dict[int, int], old_input: numpy.ndarray, old_output: numpy.ndarray, representatives: List[Set[int]]): """ Applies the chu-liu-edmonds algorithm recursively to a graph with edge weights defined by score_matrix. Note that this function operates in place, so variables will be modified. Parameters ---------- length : ``int``, required. The number of nodes. score_matrix : ``numpy.ndarray``, required. The score matrix representing the scores for pairs of nodes. current_nodes : ``List[bool]``, required. The nodes which are representatives in the graph. A representative at it's most basic represents a node, but as the algorithm progresses, individual nodes will represent collapsed cycles in the graph. final_edges: ``Dict[int, int]``, required. An empty dictionary which will be populated with the nodes which are connected in the maximum spanning tree. old_input: ``numpy.ndarray``, required. a map from an edge to its head node. Key: The edge is a tuple, and elements in a tuple could be a node or a representative of a cycle. old_output: ``numpy.ndarray``, required. representatives : ``List[Set[int]]``, required. A list containing the nodes that a particular node is representing at this iteration in the graph. Returns ------- Nothing - all variables are modified in place. """ # Set the initial graph to be the greedy best one. # Node '0' is always the root node. parents = [-1] for node1 in range(1, length): # Init the parent of each node to be the root node. parents.append(0) if current_nodes[node1]: # If the node is a representative, # find the max outgoing edge to other non-root representative, # and update its parent. max_score = score_matrix[0, node1] for node2 in range(1, length): if node2 == node1 or not current_nodes[node2]: continue new_score = score_matrix[node2, node1] if new_score > max_score: max_score = new_score parents[node1] = node2 # Check if this solution has a cycle. has_cycle, cycle = _find_cycle(parents, length, current_nodes) # If there are no cycles, find all edges and return. if not has_cycle: final_edges[0] = -1 for node in range(1, length): if not current_nodes[node]: continue parent = old_input[parents[node], node] child = old_output[parents[node], node] final_edges[child] = parent return # Otherwise, we have a cycle so we need to remove an edge. # From here until the recursive call is the contraction stage of the algorithm. cycle_weight = 0.0 # Find the weight of the cycle. index = 0 for node in cycle: index += 1 cycle_weight += score_matrix[parents[node], node] # For each node in the graph, find the maximum weight incoming # and outgoing edge into the cycle. cycle_representative = cycle[0] for node in range(length): # Nodes not in the cycle. if not current_nodes[node] or node in cycle: continue in_edge_weight = float("-inf") in_edge = -1 out_edge_weight = float("-inf") out_edge = -1 for node_in_cycle in cycle: if score_matrix[node_in_cycle, node] > in_edge_weight: in_edge_weight = score_matrix[node_in_cycle, node] in_edge = node_in_cycle # Add the new edge score to the cycle weight # and subtract the edge we're considering removing. score = (cycle_weight + score_matrix[node, node_in_cycle] - score_matrix[parents[node_in_cycle], node_in_cycle]) if score > out_edge_weight: out_edge_weight = score out_edge = node_in_cycle score_matrix[cycle_representative, node] = in_edge_weight old_input[cycle_representative, node] = old_input[in_edge, node] old_output[cycle_representative, node] = old_output[in_edge, node] score_matrix[node, cycle_representative] = out_edge_weight old_output[node, cycle_representative] = old_output[node, out_edge] old_input[node, cycle_representative] = old_input[node, out_edge] # For the next recursive iteration, we want to consider the cycle as a # single node. Here we collapse the cycle into the first node in the # cycle (first node is arbitrary), set all the other nodes not be # considered in the next iteration. We also keep track of which # representatives we are considering this iteration because we need # them below to check if we're done. considered_representatives: List[Set[int]] = [] for i, node_in_cycle in enumerate(cycle): considered_representatives.append(set()) if i > 0: # We need to consider at least one # node in the cycle, arbitrarily choose # the first. current_nodes[node_in_cycle] = False for node in representatives[node_in_cycle]: considered_representatives[i].add(node) if i > 0: representatives[cycle_representative].add(node) chu_liu_edmonds(length, score_matrix, current_nodes, final_edges, old_input, old_output, representatives) # Expansion stage. # check each node in cycle, if one of its representatives # is a key in the final_edges, it is the one we need. # The node we are looking for is the node which is the child # of the incoming edge to the cycle. found = False key_node = -1 for i, node in enumerate(cycle): for cycle_rep in considered_representatives[i]: if cycle_rep in final_edges: key_node = node found = True break if found: break # break the cycle. previous = parents[key_node] while previous != key_node: child = old_output[parents[previous], previous] parent = old_input[parents[previous], previous] final_edges[child] = parent previous = parents[previous] def _find_cycle(parents: List[int], length: int, current_nodes: List[bool]) -> Tuple[bool, List[int]]: """ :return: has_cycle: whether the graph has at least a cycle. cycle: a list of nodes which form a cycle in the graph. """ # 'added' means that the node has been visited. added = [False for _ in range(length)] added[0] = True cycle = set() has_cycle = False for i in range(1, length): if has_cycle: break # don't redo nodes we've already # visited or aren't considering. if added[i] or not current_nodes[i]: continue # Initialize a new possible cycle. this_cycle = set() this_cycle.add(i) added[i] = True has_cycle = True next_node = i while parents[next_node] not in this_cycle: next_node = parents[next_node] # If we see a node we've already processed, # we can stop, because the node we are # processing would have been in that cycle. # Note that in the first pass of the for loop, # every node except that the root has been assigned # a head, if there's no cycle, the while loop # will finally arrive at the root if added[next_node]: has_cycle = False break added[next_node] = True this_cycle.add(next_node) if has_cycle: original = next_node cycle.add(original) next_node = parents[original] while next_node != original: cycle.add(next_node) next_node = parents[next_node] break return has_cycle, list(cycle) def decode_mst_with_coreference( energy: numpy.ndarray, coreference: List[int], length: int, has_labels: bool = True) -> Tuple[numpy.ndarray, numpy.ndarray]: """ Note: Counter to typical intuition, this function decodes the _maximum_ spanning tree. Decode the optimal MST tree with the Chu-Liu-Edmonds algorithm for maximum spanning arboresences on graphs. Parameters ---------- energy : ``numpy.ndarray``, required. A tensor with shape (num_labels, timesteps, timesteps) containing the energy of each edge. If has_labels is ``False``, the tensor should have shape (timesteps, timesteps) instead. coreference: ``List[int]``, required. A list which maps a node to its first precedent. length : ``int``, required. The length of this sequence, as the energy may have come from a padded batch. has_labels : ``bool``, optional, (default = True) Whether the graph has labels or not. """ if has_labels and energy.ndim != 3: raise ConfigurationError("The dimension of the energy array is not equal to 3.") elif not has_labels and energy.ndim != 2: raise ConfigurationError("The dimension of the energy array is not equal to 2.") input_shape = energy.shape max_length = input_shape[-1] # Our energy matrix might have been batched - # here we clip it to contain only non padded tokens. if has_labels: energy = energy[:, :length, :length] # get best label for each edge. label_id_matrix = energy.argmax(axis=0) energy = energy.max(axis=0) else: energy = energy[:length, :length] label_id_matrix = None # get original score matrix original_score_matrix = energy # initialize score matrix to original score matrix score_matrix = numpy.array(original_score_matrix, copy=True) old_input = numpy.zeros([length, length], dtype=numpy.int32) old_output = numpy.zeros([length, length], dtype=numpy.int32) current_nodes = [True for _ in range(length)] representatives: List[Set[int]] = [] for node1 in range(length): original_score_matrix[node1, node1] = 0.0 score_matrix[node1, node1] = 0.0 representatives.append({node1}) for node2 in range(node1 + 1, length): old_input[node1, node2] = node1 old_output[node1, node2] = node2 old_input[node2, node1] = node2 old_output[node2, node1] = node1 final_edges: Dict[int, int] = {} # The main algorithm operates inplace. adapted_chu_liu_edmonds( length, score_matrix, coreference, current_nodes, final_edges, old_input, old_output, representatives) # Modify edges which are invalid according to coreference. _validate(final_edges, length, original_score_matrix, coreference) heads = numpy.zeros([max_length], numpy.int32) if has_labels: head_type = numpy.ones([max_length], numpy.int32) else: head_type = None for child, parent in final_edges.items(): heads[child] = parent if has_labels: head_type[child] = label_id_matrix[parent, child] return heads, head_type def adapted_chu_liu_edmonds(length: int, score_matrix: numpy.ndarray, coreference: List[int], current_nodes: List[bool], final_edges: Dict[int, int], old_input: numpy.ndarray, old_output: numpy.ndarray, representatives: List[Set[int]]): """ Applies the chu-liu-edmonds algorithm recursively to a graph with edge weights defined by score_matrix. Note that this function operates in place, so variables will be modified. Parameters ---------- length : ``int``, required. The number of nodes. score_matrix : ``numpy.ndarray``, required. The score matrix representing the scores for pairs of nodes. coreference: ``List[int]``, required. A list which maps a node to its first precedent. current_nodes : ``List[bool]``, required. The nodes which are representatives in the graph. A representative at it's most basic represents a node, but as the algorithm progresses, individual nodes will represent collapsed cycles in the graph. final_edges: ``Dict[int, int]``, required. An empty dictionary which will be populated with the nodes which are connected in the maximum spanning tree. old_input: ``numpy.ndarray``, required. a map from an edge to its head node. Key: The edge is a tuple, and elements in a tuple could be a node or a representative of a cycle. old_output: ``numpy.ndarray``, required. representatives : ``List[Set[int]]``, required. A list containing the nodes that a particular node is representing at this iteration in the graph. Returns ------- Nothing - all variables are modified in place. """ # Set the initial graph to be the greedy best one. # Node '0' is always the root node. parents = [-1] for node1 in range(1, length): # Init the parent of each node to be the root node. parents.append(0) if current_nodes[node1]: # If the node is a representative, # find the max outgoing edge to other non-root representative, # and update its parent. max_score = score_matrix[0, node1] for node2 in range(1, length): if node2 == node1 or not current_nodes[node2]: continue # Exclude edges formed by two coreferred nodes _parent = old_input[node1, node2] _child = old_output[node1, node2] if coreference[_parent] == coreference[_child]: continue new_score = score_matrix[node2, node1] if new_score > max_score: max_score = new_score parents[node1] = node2 # Check if this solution has a cycle. has_cycle, cycle = _find_cycle(parents, length, current_nodes) # If there are no cycles, find all edges and return. if not has_cycle: final_edges[0] = -1 for node in range(1, length): if not current_nodes[node]: continue parent = old_input[parents[node], node] child = old_output[parents[node], node] final_edges[child] = parent return # Otherwise, we have a cycle so we need to remove an edge. # From here until the recursive call is the contraction stage of the algorithm. cycle_weight = 0.0 # Find the weight of the cycle. index = 0 for node in cycle: index += 1 cycle_weight += score_matrix[parents[node], node] # For each node in the graph, find the maximum weight incoming # and outgoing edge into the cycle. cycle_representative = cycle[0] for node in range(length): # Nodes not in the cycle. if not current_nodes[node] or node in cycle: continue in_edge_weight = float("-inf") in_edge = -1 out_edge_weight = float("-inf") out_edge = -1 for node_in_cycle in cycle: # Exclude edges formed by two coreferred nodes. _parent = old_input[node_in_cycle, node] _child = old_output[node_in_cycle, node] if coreference[_parent] != coreference[_child]: if score_matrix[node_in_cycle, node] > in_edge_weight: in_edge_weight = score_matrix[node_in_cycle, node] in_edge = node_in_cycle # Exclude edges formed by two coreferred nodes. _parent = old_input[node, node_in_cycle] _child = old_output[node, node_in_cycle] if coreference[_parent] != coreference[_child]: # Add the new edge score to the cycle weight # and subtract the edge we're considering removing. score = (cycle_weight + score_matrix[node, node_in_cycle] - score_matrix[parents[node_in_cycle], node_in_cycle]) if score > out_edge_weight: out_edge_weight = score out_edge = node_in_cycle score_matrix[cycle_representative, node] = in_edge_weight old_input[cycle_representative, node] = old_input[in_edge, node] old_output[cycle_representative, node] = old_output[in_edge, node] score_matrix[node, cycle_representative] = out_edge_weight old_output[node, cycle_representative] = old_output[node, out_edge] old_input[node, cycle_representative] = old_input[node, out_edge] # For the next recursive iteration, we want to consider the cycle as a # single node. Here we collapse the cycle into the first node in the # cycle (first node is arbitrary), set all the other nodes not be # considered in the next iteration. We also keep track of which # representatives we are considering this iteration because we need # them below to check if we're done. considered_representatives: List[Set[int]] = [] for i, node_in_cycle in enumerate(cycle): considered_representatives.append(set()) if i > 0: # We need to consider at least one # node in the cycle, arbitrarily choose # the first. current_nodes[node_in_cycle] = False for node in representatives[node_in_cycle]: considered_representatives[i].add(node) if i > 0: representatives[cycle_representative].add(node) adapted_chu_liu_edmonds(length, score_matrix, coreference, current_nodes, final_edges, old_input, old_output, representatives) # Expansion stage. # check each node in cycle, if one of its representatives # is a key in the final_edges, it is the one we need. # The node we are looking for is the node which is the child # of the incoming edge to the cycle. found = False key_node = -1 for i, node in enumerate(cycle): for cycle_rep in considered_representatives[i]: if cycle_rep in final_edges: key_node = node found = True break if found: break # break the cycle. previous = parents[key_node] while previous != key_node: child = old_output[parents[previous], previous] parent = old_input[parents[previous], previous] final_edges[child] = parent previous = parents[previous] def _validate(final_edges, length, original_score_matrix, coreference): # Count how many edges have been modified by this function. modified = 0 # Make a constant used by _find_cycle. current_nodes = [True for _ in range(length)] # Group nodes by coreference. group_by_precedent = {} for node, precedent in enumerate(coreference): if precedent not in group_by_precedent: group_by_precedent[precedent] = [] group_by_precedent[precedent].append(node) # Validate parents of nodes in each group. for group in group_by_precedent.values(): # Skip if only one node in the group. if len(group) == 1: continue # Group conflicting nodes by parent. conflicts_by_parent = {} for child in group: parent = final_edges[child] if parent not in conflicts_by_parent: conflicts_by_parent[parent] = [] conflicts_by_parent[parent].append(child) # Keep the parents which have already been taken. reserved_parents = set(conflicts_by_parent.keys()) for parent, conflicts in conflicts_by_parent.items(): # Skip if no conflict. if len(conflicts) == 1: continue # Find the node that has the maximum edge with the parent. winner = max(conflicts, key=lambda _child: original_score_matrix[parent, _child]) # Modify other nodes' parents. for child in conflicts: # Skip the winner. if child == winner: continue # Sort its candidate parents by score. parent_scores = original_score_matrix[:, child] for _parent in
numpy.argsort(parent_scores)
numpy.argsort
# -*- coding: utf-8 -*- """ Created on Fri Nov 5 01:34:00 2021 @author: yrc2 """ import biosteam as bst import biorefineries.oilcane as oc from biosteam.utils import CABBI_colors, colors from thermosteam.utils import set_figure_size, set_font, roundsigfigs from thermosteam.units_of_measure import format_units from colorpalette import Palette import matplotlib.pyplot as plt import matplotlib.patches as mpatches from warnings import warn import numpy as np import pandas as pd from matplotlib.gridspec import GridSpec from . import _variable_mockups as variables from ._variable_mockups import ( tea_monte_carlo_metric_mockups, tea_monte_carlo_derivative_metric_mockups, lca_monte_carlo_metric_mockups, lca_monte_carlo_derivative_metric_mockups, MFPP, TCI, electricity_production, natural_gas_consumption, ethanol_production, biodiesel_production, GWP_ethanol, GWP_biodiesel, GWP_electricity, GWP_ethanol_allocation, GWP_biodiesel_allocation, GWP_economic, MFPP_derivative, TCI_derivative, ethanol_production_derivative, biodiesel_production_derivative, electricity_production_derivative, natural_gas_consumption_derivative, GWP_ethanol_derivative, ) from ._load_data import ( images_folder, get_monte_carlo, spearman_file, ) import os from._parse_configuration import format_name __all__ = ( 'plot_all', 'plot_montecarlo_main_manuscript', 'plot_breakdowns', 'plot_montecarlo_feedstock_comparison', 'plot_montecarlo_configuration_comparison', 'plot_montecarlo_agile_comparison', 'plot_montecarlo_derivative', 'plot_montecarlo_absolute', 'plot_spearman_tea', 'plot_spearman_lca', 'plot_spearman_tea_short', 'plot_spearman_lca_short', 'plot_monte_carlo_across_coordinate', 'monte_carlo_box_plot', 'plot_monte_carlo', 'plot_spearman', 'plot_configuration_breakdown', 'plot_TCI_areas_across_oil_content', 'plot_heatmap_comparison', 'plot_feedstock_conventional_comparison_kde', 'plot_feedstock_cellulosic_comparison_kde', 'plot_configuration_comparison_kde', 'plot_open_comparison_kde', 'plot_feedstock_comparison_kde', 'plot_crude_configuration_comparison_kde', 'plot_agile_comparison_kde', 'plot_separated_configuration_comparison_kde', 'area_colors', 'area_hatches', ) area_colors = { 'Feedstock handling': CABBI_colors.teal, 'Juicing': CABBI_colors.green_dirty, 'EtOH prod.': CABBI_colors.blue, 'Ethanol production': CABBI_colors.blue, 'Oil ext.': CABBI_colors.brown, 'Oil extraction': CABBI_colors.brown, 'Biod. prod.': CABBI_colors.orange, 'Biodiesel production': CABBI_colors.orange, 'Pretreatment': CABBI_colors.green, 'Wastewater treatment': colors.purple, 'CH&P': CABBI_colors.yellow, 'Co-Heat and Power': CABBI_colors.yellow, 'Utilities': colors.red, 'Storage': CABBI_colors.grey, 'HXN': colors.orange, 'Heat exchanger network': colors.orange, } area_hatches = { 'Feedstock handling': 'x', 'Juicing': '-', 'EtOH prod.': '/', 'Ethanol production': '/', 'Oil ext.': '\\', 'Oil extraction': '\\', 'Biod. prod.': '/|', 'Biodiesel production': '/|', 'Pretreatment': '//', 'Wastewater treatment': r'\\', 'CH&P': '', 'Co-Heat and Power': '', 'Utilities': '\\|', 'Storage': '', 'HXN': '+', 'Heat exchanger network': '+', } for i in area_colors: area_colors[i] = area_colors[i].tint(20) palette = Palette(**area_colors) letter_color = colors.neutral.shade(25).RGBn GWP_units_L = '$\\mathrm{kg} \\cdot \\mathrm{CO}_{2}\\mathrm{eq} \\cdot \\mathrm{L}^{-1}$' GWP_units_L_small = GWP_units_L.replace('kg', 'g') CABBI_colors.orange_hatch = CABBI_colors.orange.copy(hatch='////') ethanol_over_biodiesel = bst.MockVariable('Ethanol over biodiesel', 'L/MT', 'Biorefinery') GWP_ethanol_displacement = variables.GWP_ethanol_displacement production = (ethanol_production, biodiesel_production) mc_metric_settings = { 'MFPP': (MFPP, f"MFPP\n[{format_units('USD/MT')}]", None), 'TCI': (TCI, f"TCI\n[{format_units('10^6*USD')}]", None), 'production': (production, f"Production\n[{format_units('L/MT')}]", None), 'electricity_production': (electricity_production, f"Elec. prod.\n[{format_units('kWhr/MT')}]", None), 'natural_gas_consumption': (natural_gas_consumption, f"NG cons.\n[{format_units('m^3/MT')}]", None), 'GWP_ethanol_displacement': (GWP_ethanol_displacement, "GWP$_{\\mathrm{displacement}}$" f"\n[{GWP_units_L}]", None), 'GWP_economic': ((GWP_ethanol, GWP_biodiesel), "GWP$_{\\mathrm{economic}}$" f"\n[{GWP_units_L}]", None), 'GWP_energy': ((GWP_ethanol_allocation, GWP_biodiesel_allocation), "GWP$_{\\mathrm{energy}}$" f"\n[{GWP_units_L}]", None), } mc_comparison_settings = { 'MFPP': (MFPP, r"$\Delta$" + f"MFPP\n[{format_units('USD/MT')}]", None), 'TCI': (TCI, r"$\Delta$" + f"TCI\n[{format_units('10^6*USD')}]", None), 'production': (production, r"$\Delta$" + f"Production\n[{format_units('L/MT')}]", None), 'electricity_production': (electricity_production, r"$\Delta$" + f"Elec. prod.\n[{format_units('kWhr/MT')}]", None), 'natural_gas_consumption': (natural_gas_consumption, r"$\Delta$" + f"NG cons.\n[{format_units('m^3/MT')}]", None), 'GWP_ethanol_displacement': (GWP_ethanol_displacement, r"$\Delta$" + "GWP$_{\\mathrm{displacement}}$" f"\n[{GWP_units_L}]", None), 'GWP_economic': (GWP_ethanol, r"$\Delta$" + "GWP$_{\\mathrm{economic}}$" f"\n[{GWP_units_L}]", None), 'GWP_energy': (GWP_ethanol_allocation, r"$\Delta$" + "GWP$_{\\mathrm{energy}}$" f"\n[{GWP_units_L}]", None), 'GWP_property_allocation': ((GWP_ethanol, GWP_ethanol_allocation), r"$\Delta$" + f"GWP\n[{GWP_units_L}]", None), } mc_derivative_metric_settings = { 'MFPP': (MFPP_derivative, r"$\Delta$" + format_units(r"MFPP/OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('USD/MT')}]", None), 'TCI': (TCI_derivative, r"$\Delta$" + format_units(r"TCI/OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('10^6*USD')}]", None), 'production': ((ethanol_production_derivative, biodiesel_production_derivative), r"$\Delta$" + format_units(r"Prod./OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('L/MT')}]", None), 'electricity_production': (electricity_production_derivative, r"$\Delta$" + format_units(r"EP/OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('kWhr/MT')}]", None), 'natural_gas_consumption': (natural_gas_consumption_derivative, r"$\Delta$" + format_units(r"NGC/OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('m^3/MT')}]", None), 'GWP_economic': (GWP_ethanol_derivative, r"$\Delta$" + r"GWP $\cdot \Delta \mathrm{OC}^{-1}$" f"\n[{GWP_units_L_small}]", 1000), } kde_metric_settings = {j[0]: j for j in mc_metric_settings.values()} kde_comparison_settings = {j[0]: j for j in mc_comparison_settings.values()} kde_derivative_settings = {j[0]: j for j in mc_derivative_metric_settings.values()} # %% Plots for publication def plot_all(): # plot_montecarlo_main_manuscript() plot_montecarlo_absolute() plot_spearman_tea() plot_spearman_lca() plot_breakdowns() def plot_montecarlo_main_manuscript(): set_font(size=8) set_figure_size(aspect_ratio=0.85) fig = plt.figure() everything = GridSpec(4, 3, fig, hspace=1.5, wspace=0.7, top=0.90, bottom=0.05, left=0.11, right=0.97) def spec2axes(spec, x, y, hspace=0, wspace=0.7, **kwargs): subspec = spec.subgridspec(x, y, hspace=hspace, wspace=wspace, **kwargs) return np.array([[fig.add_subplot(subspec[i, j]) for j in range(y)] for i in range(x)], object) gs_feedstock_comparison = everything[:2, :] gs_configuration_comparison = everything[2:, :2] gs_agile_comparison = everything[2:, 2] axes_feedstock_comparison = spec2axes(gs_feedstock_comparison, 2, 3) axes_configuration_comparison = spec2axes(gs_configuration_comparison, 2, 2) axes_agile_comparison = spec2axes(gs_agile_comparison, 2, 1) plot_montecarlo_feedstock_comparison(axes_feedstock_comparison, letters='ABCDEFG') plot_montecarlo_configuration_comparison(axes_configuration_comparison, letters='ABCDEFG') plot_montecarlo_agile_comparison(axes_agile_comparison, letters='ABCDEFG') def add_title(gs, title): ax = fig.add_subplot(gs) ax._frameon = False ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) ax.set_title( title, color=letter_color, horizontalalignment='center',verticalalignment='center', fontsize=12, fontweight='bold', y=1.1 ) add_title(gs_feedstock_comparison, '(I) Impact of opting to process oilcane over sugarcane') add_title(gs_configuration_comparison, '(II) Impact of cellulosic ethanol integration') add_title(gs_agile_comparison, '(III) Impact of\noilsorghum\nintegration') plt.show() for i in ('svg', 'png'): file = os.path.join(images_folder, f'montecarlo_main_manuscript.{i}') plt.savefig(file, transparent=True) def plot_montecarlo_feedstock_comparison(axes_box=None, letters=None, single_column=True): if single_column: width = 'half' aspect_ratio = 2.25 ncols = 1 left = 0.255 bottom = 0.05 else: width = None aspect_ratio = 0.75 left = 0.105 bottom = 0.12 ncols = 3 if axes_box is None: set_font(size=8) set_figure_size(width=width, aspect_ratio=aspect_ratio) fig, axes = plot_monte_carlo( derivative=False, absolute=False, comparison=True, tickmarks=None, agile=False, ncols=ncols, axes_box=axes_box, labels=[ 'Direct Cogeneration', 'Integrated Co-Fermentation', # 'Direct Cogeneration', # 'Integrated Co-Fermentation', ], comparison_names=['O1 - S1', 'O2 - S2'], metrics = ['MFPP', 'TCI', 'production', 'GWP_property_allocation', 'natural_gas_consumption', 'electricity_production'], color_wheel = CABBI_colors.wheel([ 'blue_light', 'green_dirty', 'orange', 'green', 'orange', 'orange_hatch', 'grey', 'brown', ]) ) for ax, letter in zip(axes, 'ABCDEFGH' if letters is None else letters): plt.sca(ax) ylb, yub = plt.ylim() plt.text(1.65, ylb + (yub - ylb) * 0.90, letter, color=letter_color, horizontalalignment='center',verticalalignment='center', fontsize=12, fontweight='bold') # if axes_box is None and letter in 'DH': # x = 0.5 # plt.text(x, ylb - (yub - ylb) * 0.3, # 'Impact of processing\noilcane over sugarcane', # horizontalalignment='center',verticalalignment='center', # fontsize=8) if axes_box is None: plt.subplots_adjust(right=0.96, left=left, wspace=0.38, top=0.98, bottom=bottom) for i in ('svg', 'png'): file = os.path.join(images_folder, f'montecarlo_feedstock_comparison.{i}') plt.savefig(file, transparent=True) def plot_montecarlo_configuration_comparison(axes_box=None, letters=None, single_column=True): if single_column: width = 'half' aspect_ratio = 2.25 ncols = 1 left = 0.255 bottom = 0.05 x = 1.65 metrics= ['MFPP', 'TCI', 'production', 'GWP_property_allocation', 'natural_gas_consumption', 'electricity_production'] else: width = None aspect_ratio = 0.75 left = 0.105 bottom = 0.12 ncols = 2 x = 0.58 metrics= ['MFPP', 'TCI', 'production', 'GWP_property_allocation'] if axes_box is None: set_font(size=8) set_figure_size(width=width, aspect_ratio=aspect_ratio) fig, axes = plot_monte_carlo( derivative=False, absolute=False, comparison=True, tickmarks=None, agile=False, ncols=ncols, axes_box=axes_box, labels=[ 'Oilcane', # 'Sugarcane', ], comparison_names=[ 'O2 - O1', # 'S2 - S1' ], metrics=metrics, color_wheel = CABBI_colors.wheel([ 'blue_light', 'green_dirty', 'orange', 'green', 'orange', 'orange_hatch', ]) ) for ax, letter in zip(axes, 'ABCDEF' if letters is None else letters): plt.sca(ax) ylb, yub = plt.ylim() plt.text(x, ylb + (yub - ylb) * 0.90, letter, color=letter_color, horizontalalignment='center',verticalalignment='center', fontsize=12, fontweight='bold') if axes_box is None: plt.subplots_adjust(right=0.96, left=left, wspace=0.38, top=0.98, bottom=bottom) for i in ('svg', 'png'): file = os.path.join(images_folder, f'montecarlo_configuration_comparison.{i}') plt.savefig(file, transparent=True) def plot_montecarlo_agile_comparison(axes_box=None, letters=None): if axes_box is None: set_font(size=8) set_figure_size(width=3.3071, aspect_ratio=1.0) fig, axes = plot_monte_carlo( derivative=False, absolute=False, comparison=True, tickmarks=None, agile_only=True, ncols=1, labels=[ 'Direct Cogeneration', 'Integrated Co-Fermentation' ], metrics=['MFPP', 'TCI'], axes_box=axes_box, ) for ax, letter in zip(axes, 'AB' if letters is None else letters): plt.sca(ax) ylb, yub = plt.ylim() plt.text(1.65, ylb + (yub - ylb) * 0.90, letter, color=letter_color, horizontalalignment='center',verticalalignment='center', fontsize=12, fontweight='bold') if axes_box is None and letter == 'B': plt.text(0.5, ylb - (yub - ylb) * 0.25, 'Impact of integrating oilsorghum\nat an agile oilcane biorefinery', horizontalalignment='center',verticalalignment='center', fontsize=8) if axes_box is None: plt.subplots_adjust(right=0.9, left=0.2, wspace=0.5, top=0.98, bottom=0.15) for i in ('svg', 'png'): file = os.path.join(images_folder, f'montecarlo_agile_comparison.{i}') plt.savefig(file, transparent=True) def plot_montecarlo_derivative(): set_font(size=8) set_figure_size( aspect_ratio=0.5, # width=3.3071, aspect_ratio=1.85 ) fig, axes = plot_monte_carlo( derivative=True, absolute=True, comparison=False, agile=False, ncols=3, # tickmarks=np.array([ # [-3, -2, -1, 0, 1, 2, 3, 4, 5], # [-9, -6, -3, 0, 3, 6, 9, 12, 15], # [-2.0, -1.5, -1.0, -0.5, 0, 0.5, 1.0, 1.5, 2], # [-16, -8, 0, 8, 16, 24, 32, 40, 48], # [-400, -300, -200, -100, 0, 100, 200, 300, 400], # [-300, -225, -150, -75, 0, 75, 150, 225, 300] # ], dtype=object), labels=['DC', 'ICF'], color_wheel = CABBI_colors.wheel([ 'blue_light', 'green_dirty', 'orange', 'green', 'grey', 'brown', 'orange', ]) ) for ax, letter in zip(axes, 'ABCDEFGH'): plt.sca(ax) ylb, yub = plt.ylim() plt.text(1.65, ylb + (yub - ylb) * 0.90, letter, color=letter_color, horizontalalignment='center',verticalalignment='center', fontsize=12, fontweight='bold') plt.subplots_adjust( hspace=0, wspace=0.7, top=0.95, bottom=0.1, left=0.12, right=0.96 ) for i in ('svg', 'png'): file = os.path.join(images_folder, f'montecarlo_derivative.{i}') plt.savefig(file, transparent=True) def plot_montecarlo_absolute(): set_font(size=8) set_figure_size(aspect_ratio=1.05) fig, axes = plot_monte_carlo( absolute=True, comparison=False, ncols=2, expand=0.1, labels=['Sugarcane\nDC', 'Oilcane\nDC', 'Sugarcane\nICF', 'Oilcane\nICF', 'Sugarcane &\nSorghum DC', 'Oilcane &\nOil-sorghum DC', 'Sugarcane &\nSorghum ICF', 'Oilcane &\nOil-sorghum ICF'], xrot=90, color_wheel = CABBI_colors.wheel([ 'blue_light', 'green_dirty', 'orange', 'green', 'grey', 'brown', 'orange', 'orange', 'green', 'orange', 'green', ]) ) for ax, letter in zip(axes, 'ABCDEFGHIJ'): plt.sca(ax) ylb, yub = plt.ylim() plt.text(7.8, ylb + (yub - ylb) * 0.92, letter, color=letter_color, horizontalalignment='center',verticalalignment='center', fontsize=12, fontweight='bold') plt.subplots_adjust(left=0.12, right=0.95, wspace=0.40, top=0.98, bottom=0.2) for i in ('svg', 'png'): file = os.path.join(images_folder, f'montecarlo_absolute.{i}') plt.savefig(file, transparent=True) def plot_spearman_tea(with_units=None, aspect_ratio=0.8, **kwargs): set_font(size=8) set_figure_size(aspect_ratio=aspect_ratio) plot_spearman( configurations=[ 'O1', 'O1*', 'O2', 'O2*', ], labels=[ 'DC', 'Oil-sorghum int., DC', 'ICF', 'Oil-sorghum int., ICF', ], kind='TEA', with_units=with_units, cutoff=0.03, **kwargs ) plt.subplots_adjust(left=0.45, right=0.975, top=0.98, bottom=0.08) for i in ('svg', 'png'): file = os.path.join(images_folder, f'spearman_tea.{i}') plt.savefig(file, transparent=True) def plot_spearman_tea_short(**kwargs): set_font(size=8) set_figure_size(aspect_ratio=0.65, width=6.6142 * 2/3) plot_spearman( configurations=[ 'O1', 'O2', ], labels=[ 'DC', 'ICF', ], kind='TEA', with_units=False, cutoff=0.03, top=5, legend=True, legend_kwargs={'loc': 'upper left'}, **kwargs ) plt.subplots_adjust(left=0.35, right=0.975, top=0.98, bottom=0.15) for i in ('svg', 'png'): file = os.path.join(images_folder, f'spearman_tea.{i}') plt.savefig(file, transparent=True) def plot_spearman_lca_short(with_units=False, aspect_ratio=0.65, **kwargs): set_font(size=8) set_figure_size(aspect_ratio=aspect_ratio, width=6.6142 * 2/3) plot_spearman( configurations=[ 'O1', 'O2', ], labels=[ 'DC', 'ICF', ], kind='LCA', with_units=with_units, cutoff=0.03, top=5, legend=False, **kwargs ) plt.subplots_adjust(left=0.35, right=0.975, top=0.98, bottom=0.15) for i in ('svg', 'png'): file = os.path.join(images_folder, f'spearman_lca.{i}') plt.savefig(file, transparent=True) def plot_spearman_lca(with_units=None, aspect_ratio=0.65, **kwargs): set_font(size=8) set_figure_size(aspect_ratio=aspect_ratio) plot_spearman( configurations=[ 'O1', 'O1*', 'O2', 'O2*', ], labels=[ 'DC', 'Oil-sorghum int., DC', 'ICF', 'Oil-sorghum int., ICF', ], kind='LCA', with_units=with_units, cutoff=0.03, **kwargs ) plt.subplots_adjust(left=0.45, right=0.975, top=0.98, bottom=0.10) for i in ('svg', 'png'): file = os.path.join(images_folder, f'spearman_lca.{i}') plt.savefig(file, transparent=True) def plot_breakdowns(): set_font(size=8) set_figure_size(aspect_ratio=0.68) fig, axes = plt.subplots(nrows=1, ncols=2) plt.sca(axes[0]) plot_configuration_breakdown('O1', ax=axes[0], legend=False) plt.sca(axes[1]) plot_configuration_breakdown('O2', ax=axes[1], legend=True) yticks = axes[1].get_yticks() plt.yticks(yticks, ['']*len(yticks)) plt.ylabel('') plt.subplots_adjust(left=0.09, right=0.96, wspace=0., top=0.84, bottom=0.31) for ax, letter in zip(axes, ['(A) Direct Cogeneration', '(B) Integrated Co-Fermentation']): plt.sca(ax) ylb, yub = plt.ylim() xlb, xub = plt.xlim() plt.text((xlb + xub) * 0.5, ylb + (yub - ylb) * 1.2, letter, color=letter_color, horizontalalignment='center',verticalalignment='center', fontsize=12, fontweight='bold') for i in ('svg', 'png'): file = os.path.join(images_folder, f'breakdowns.{i}') plt.savefig(file, transparent=True) # %% Heatmap def get_fraction_in_same_direction(data, direction): return (direction * data >= 0.).sum(axis=0) / data.size def get_median(data): return roundsigfigs(np.percentile(data, 50, axis=0)) def plot_heatmap_comparison(comparison_names=None, xlabels=None): if comparison_names is None: comparison_names = oc.comparison_names columns = comparison_names if xlabels is None: xlabels = [format_name(i).replace(' ', '') for i in comparison_names] def get_data(metric, name): df = get_monte_carlo(name, metric) values = df.values return values GWP_economic, GWP_ethanol, GWP_biodiesel, GWP_electricity, GWP_crude_glycerol, = lca_monte_carlo_metric_mockups MFPP, TCI, ethanol_production, biodiesel_production, electricity_production, natural_gas_consumption = tea_monte_carlo_metric_mockups GWP_ethanol_displacement = variables.GWP_ethanol_displacement GWP_ethanol_allocation = variables.GWP_ethanol_allocation rows = [ MFPP, TCI, ethanol_production, biodiesel_production, electricity_production, natural_gas_consumption, GWP_ethanol_displacement, GWP_ethanol_allocation, GWP_ethanol, # economic ] ylabels = [ f"MFPP\n[{format_units('USD/MT')}]", f"TCI\n[{format_units('10^6*USD')}]", f"Ethanol production\n[{format_units('L/MT')}]", f"Biodiesel production\n[{format_units('L/MT')}]", f"Elec. prod.\n[{format_units('kWhr/MT')}]", f"NG cons.\n[{format_units('m^3/MT')}]", "GWP$_{\\mathrm{displacement}}$" f"\n[{GWP_units_L}]", "GWP$_{\\mathrm{energy}}$" f"\n[{GWP_units_L}]", "GWP$_{\\mathrm{economic}}$" f"\n[{GWP_units_L}]", ] N_rows = len(rows) N_cols = len(comparison_names) data = np.zeros([N_rows, N_cols], dtype=object) data[:] = [[get_data(i, j) for j in columns] for i in rows] medians = np.zeros_like(data, dtype=float) fractions = medians.copy() for i in range(N_rows): for j in range(N_cols): medians[i, j] = x = get_median(data[i, j]) fractions[i, j] = get_fraction_in_same_direction(data[i, j], 1 if x > 0 else -1) fig, ax = plt.subplots() mbar = bst.plots.MetricBar( 'Fraction in the same direction [%]', ticks=[-100, -75, -50, -25, 0, 25, 50, 75, 100], cmap=plt.cm.get_cmap('RdYlGn') ) im, cbar = bst.plots.plot_heatmap( 100 * fractions, vmin=0, vmax=100, ax=ax, cell_labels=medians, metric_bar=mbar, xlabels=xlabels, ylabels=ylabels, ) cbar.ax.set_ylabel(mbar.title, rotation=-90, va="bottom") plt.sca(ax) ax.spines[:].set_visible(False) plt.grid(True, 'major', 'both', lw=1, color='w', ls='-') # %% KDE def plot_kde(name, metrics=(GWP_ethanol, MFPP), xticks=None, yticks=None, xbox_kwargs=None, ybox_kwargs=None, top_left='', top_right='Tradeoff', bottom_left='Tradeoff', bottom_right=''): set_font(size=8) set_figure_size(width='half', aspect_ratio=1.20) Xi, Yi = [i.index for i in metrics] df = oc.get_monte_carlo(name, metrics) y = df[Yi].values x = df[Xi].values sX, sY = [kde_comparison_settings[i] for i in metrics] _, xlabel, fx = sX _, ylabel, fy = sY if fx: x *= fx if fy: y *= fy ax = bst.plots.plot_kde( y=y, x=x, xticks=xticks, yticks=yticks, xticklabels=True, yticklabels=True, xbox_kwargs=xbox_kwargs or dict(light=CABBI_colors.orange.RGBn, dark=CABBI_colors.orange.shade(60).RGBn), ybox_kwargs=ybox_kwargs or dict(light=CABBI_colors.blue.RGBn, dark=CABBI_colors.blue.shade(60).RGBn), ) plt.sca(ax) plt.xlabel(xlabel.replace('\n', ' ')) plt.ylabel(ylabel.replace('\n', ' ')) bst.plots.plot_quadrants() xlb, xub = plt.xlim() ylb, yub = plt.ylim() xpos = lambda x: xlb + (xub - xlb) * x # xlpos = lambda x: xlb * (1 - x) ypos = lambda y: ylb + (yub - ylb) * y y_mt_0 = y > 0 y_lt_0 = y < 0 x_mt_0 = x > 0 x_lt_0 = x < 0 xleft = 0.02 xright = 0.98 ytop = 0.94 ybottom = 0.02 if yub > 0. and xlb < 0.: if top_left.endswith('()'): p = (y_mt_0 & x_lt_0).sum() / y.size top_left = f"{p:.0%} {top_left.strip('()')}" plt.text(xpos(xleft), ypos(ytop), top_left, color=CABBI_colors.teal.shade(50).RGBn, horizontalalignment='left', verticalalignment='top', fontsize=10, fontweight='bold', zorder=10) if ylb < 0. and xlb < 0.: if bottom_left.endswith('()'): p = (y_lt_0 & x_lt_0).sum() / y.size bottom_left = f"{p:.0%} {bottom_left.strip('()')}" plt.text(xpos(xleft), ypos(ybottom), bottom_left, color=CABBI_colors.grey.shade(75).RGBn, horizontalalignment='left', verticalalignment='bottom', fontsize=10, fontweight='bold', zorder=10) if yub > 0. and xub > 0.: if top_right.endswith('()'): p = (y_mt_0 & x_mt_0).sum() / y.size top_right = f"{p:.0%} {top_right.strip('()')}" plt.text(xpos(xright), ypos(ytop), top_right, color=CABBI_colors.grey.shade(75).RGBn, horizontalalignment='right', verticalalignment='top', fontsize=10, fontweight='bold', zorder=10) if ylb < 0. and xub > 0.: if bottom_right.endswith('()'): p = (y_lt_0 & x_mt_0).sum() / y.size bottom_right = f"{p:.0%} {bottom_right.strip('()')}" plt.text(xpos(xright), ypos(ybottom), bottom_right, color=colors.red.shade(50).RGBn, horizontalalignment='right', verticalalignment='bottom', fontsize=10, fontweight='bold', zorder=10) plt.subplots_adjust( hspace=0.05, wspace=0.05, top=0.98, bottom=0.15, left=0.15, right=0.98, ) def plot_kde_2d(name, metrics=(GWP_ethanol, MFPP), xticks=None, yticks=None, top_left='', top_right='Tradeoff', bottom_left='Tradeoff', bottom_right='', xbox_kwargs=None, ybox_kwargs=None, titles=None): set_font(size=8) set_figure_size(aspect_ratio=0.65) if isinstance(name, str): name = (name,) Xi, Yi = [i.index for i in metrics] dfs = [oc.get_monte_carlo(i, metrics) for i in name] sX, sY = [kde_comparison_settings[i] for i in metrics] _, xlabel, fx = sX _, ylabel, fy = sY xs = np.array([[df[Xi] for df in dfs]]) ys = np.array([[df[Yi] for df in dfs]]) if fx: xs *= fx if fy: ys *= fy axes = bst.plots.plot_kde_2d( xs=xs, ys=ys, xticks=xticks, yticks=yticks, xticklabels=[True, True], yticklabels=[True, True], xbox_kwargs=2*[xbox_kwargs or dict(light=CABBI_colors.orange.RGBn, dark=CABBI_colors.orange.shade(60).RGBn)], ybox_kwargs=[ybox_kwargs or dict(light=CABBI_colors.blue.RGBn, dark=CABBI_colors.blue.shade(60).RGBn)], ) M, N = axes.shape xleft = 0.02 xright = 0.98 ytop = 0.94 ybottom = 0.02 for i in range(M): for j in range(N): ax = axes[i, j] plt.sca(ax) if i == M - 1: plt.xlabel(xlabel.replace('\n', ' ')) if j == 0: plt.ylabel(ylabel.replace('\n', ' ')) bst.plots.plot_quadrants() xlb, xub = plt.xlim() ylb, yub = plt.ylim() xpos = lambda x: xlb + (xub - xlb) * x # xlpos = lambda x: xlb * (1 - x) ypos = lambda y: ylb + (yub - ylb) * y df = dfs[j] x = df[Xi] y = df[Yi] y_mt_0 = y > 0 y_lt_0 = y < 0 x_mt_0 = x > 0 x_lt_0 = x < 0 if yub > 0. and xlb < 0. and top_left: if top_left.endswith('()'): p = (y_mt_0 & x_lt_0).sum() / y.size top_left = f"{p:.0%} {top_left.strip('()')}" replacement = '()' else: replacement = None plt.text(xpos(xleft), ypos(ytop), top_left, color=CABBI_colors.teal.shade(50).RGBn, horizontalalignment='left', verticalalignment='top', fontsize=10, fontweight='bold', zorder=10) top_left = replacement if ylb < 0. and xlb < 0. and bottom_left: if bottom_left.endswith('()'): p = (y_lt_0 & x_lt_0).sum() / y.size bottom_left = f"{p:.0%} {bottom_left.strip('()')}" replacement = '()' else: replacement = None plt.text(xpos(xleft), ypos(ybottom), bottom_left, color=CABBI_colors.grey.shade(75).RGBn, horizontalalignment='left', verticalalignment='bottom', fontsize=10, fontweight='bold', zorder=10) bottom_left = replacement if yub > 0. and xub > 0. and top_right: if top_right.endswith('()'): p = (y_mt_0 & x_mt_0).sum() / y.size top_right = f"{p:.0%} {top_right.strip('()')}" replacement = '()' else: replacement = None plt.text(xpos(xright), ypos(ytop), top_right, color=CABBI_colors.grey.shade(75).RGBn, horizontalalignment='right', verticalalignment='top', fontsize=10, fontweight='bold', zorder=10) top_right = replacement if ylb < 0. and xub > 0. and bottom_right: if bottom_right.endswith('()'): p = (y_lt_0 & x_mt_0).sum() / y.size bottom_right = f"{p:.0%} {bottom_right.strip('()')}" replacement = '()' else: replacement = None plt.text(xpos(xright), ypos(ybottom), bottom_right, color=colors.red.shade(50).RGBn, horizontalalignment='right', verticalalignment='bottom', fontsize=10, fontweight='bold', zorder=10) bottom_right = replacement plt.subplots_adjust( hspace=0, wspace=0, top=0.98, bottom=0.15, left=0.1, right=0.98, ) if titles: plt.subplots_adjust( top=0.90, ) for ax, letter in zip(axes[0, :], titles): plt.sca(ax) ylb, yub = plt.ylim() xlb, xub = plt.xlim() plt.text((xlb + xub) * 0.5, ylb + (yub - ylb) * 1.17, letter, color=letter_color, horizontalalignment='center', verticalalignment='center', fontsize=12, fontweight='bold') def plot_feedstock_conventional_comparison_kde(): plot_kde( 'O1 - S1', yticks=[-20, -10, 0, 10, 20, 30, 40], xticks=[-0.12, -0.09, -0.06, -0.03, 0, 0.03, 0.06], top_left='Oilcane Favored', bottom_right='Sugarcane\nFavored', top_right='GWP\nTradeoff()', bottom_left='MFPP\nTradeoff()', ) for i in ('svg', 'png'): file = os.path.join(images_folder, f'feedstock_conventional_comparison_kde.{i}') plt.savefig(file, transparent=True) def plot_feedstock_cellulosic_comparison_kde(): plot_kde( 'O2 - S2', yticks=[-40, -20, 0, 20, 40, 60, 80], xticks=[-5, -4, -3, -2, -1, 0], top_left='Oilcane Favored', bottom_right='Sugarcane Favored', top_right='GWP\nTradeoff()', bottom_left='MFPP\nTradeoff()', fx=1000., ) for i in ('svg', 'png'): file = os.path.join(images_folder, f'feedstock_cellulosic_comparison_kde.{i}') plt.savefig(file, transparent=True) def plot_feedstock_comparison_kde(): plot_kde_2d( ('O1 - S1', 'O2 - S2'), yticks=[[-10, 0, 10, 20, 30, 40, 50, 60]], xticks=[[-0.12, -0.09, -0.06, -0.03, 0, 0.03, 0.06], [-2.0, -1.5, -1, -0.5, 0., 0.5, 1.0]], top_right='GWP\nTradeoff()', bottom_left='MFPP\nTradeoff()', top_left='Oilcane\nFavored()', bottom_right='\nSugarcane\nFavored()', titles=['(A) Direct Cogeneration', '(B) Integrated Co-Fermentation'], ) plt.subplots_adjust( wspace=0, ) for i in ('svg', 'png'): file = os.path.join(images_folder, f'feedstock_comparison_kde.{i}') plt.savefig(file, transparent=True) def plot_configuration_comparison_kde(): plot_kde( 'O1 - O2', yticks=[-20, 0, 20, 40, 60], xticks=[-2, -1.5, -1, -0.5, 0, 0.5, 1], top_right='GWP\nTradeoff()', bottom_left='MFPP\nTradeoff()', top_left='DC Favored()', bottom_right='ICF\nFavored()', ) for i in ('svg', 'png'): file = os.path.join(images_folder, f'configuration_comparison_kde.{i}') plt.savefig(file, transparent=True) def plot_separated_configuration_comparison_kde(): plot_kde_2d( ('O1', 'O2'), yticks=[[-20, 0, 20, 40, 60]], xticks=[ [0, 0.5, 1, 1.5], [0, 2, 4, 6, 8, 10] ], top_right='GWP\nTradeoff()', bottom_left='MFPP\nTradeoff()', top_left='DC Favored()', bottom_right='ICF\nFavored()', ) for i in ('svg', 'png'): file = os.path.join(images_folder, f'separated_configuration_comparison_kde.{i}') plt.savefig(file, transparent=True) def plot_crude_configuration_comparison_kde(): plot_kde_2d( ('O1 - O3', 'O2 - O4'), yticks=[[-12, 0, 12, 24, 36, 48]], xticks=[ [-0.5, -0.4, -0.3, -0.2, -0.1, 0], [-1, -0.8, -0.6, -0.4, -0.2, 0] ], top_right='GWP\nTradeoff()', bottom_left='MFPP\nTradeoff()', top_left='Biodiesel\nProduction Favored()', bottom_right='Crude Oil\nProduction Favored()', titles=['(A) Direct Cogeneration', '(B) Integrated Co-Fermentation'], ) for i in ('svg', 'png'): file = os.path.join(images_folder, f'crude_configuration_comparison_kde.{i}') plt.savefig(file, transparent=True) def plot_agile_comparison_kde(): plot_kde_2d( ('O1* - O1', 'O2* - O2'), metrics=[TCI, MFPP], yticks=[[0, 3, 6, 9, 12, 15]], xticks=2*[[-150, -125, -100, -75, -50, -25, 0]], top_right='TCI-Tradeoff()', bottom_left='MFPP\nTradeoff()', top_left='Sorghum\nIntegration Favored()', bottom_right='Cane-only\nFavored()', xbox_kwargs=dict(light=CABBI_colors.green_dirty.RGBn, dark=CABBI_colors.green_dirty.shade(60).RGBn), titles=['(A) Direct Cogeneration', '(B) Integrated Co-Fermentation'], ) for i in ('svg', 'png'): file = os.path.join(images_folder, f'agile_conventional_comparison_kde.{i}') plt.savefig(file, transparent=True) def plot_open_comparison_kde(overlap=False): metrics = [MFPP, TCI, GWP_ethanol, biodiesel_production] df_conventional_oc = oc.get_monte_carlo('O1', metrics) df_cellulosic_oc = oc.get_monte_carlo('O2', metrics) df_conventional_sc = oc.get_monte_carlo('S1', metrics) df_cellulosic_sc = oc.get_monte_carlo('S2', metrics) MFPPi = MFPP.index TCIi = TCI.index if overlap: ys = np.zeros([1, 2], dtype=object) xs = np.zeros([1, 2], dtype=object) ys[0, 0] = (df_conventional_oc[MFPPi], df_cellulosic_oc[MFPPi]) ys[0, 1] = (df_conventional_sc[MFPPi], df_cellulosic_sc[MFPPi]) xs[0, 0] = (df_conventional_oc[TCIi], df_cellulosic_oc[TCIi]) xs[0, 1] = (df_conventional_sc[TCIi], df_cellulosic_sc[TCIi]) yticks = [[-30, -15, 0, 15, 30, 45, 60, 75]] xticks = 2*[[200, 300, 400, 500, 600]] else: ys = np.array([ [df_conventional_oc[MFPPi], df_conventional_sc[MFPPi]], [df_cellulosic_oc[MFPPi], df_cellulosic_sc[MFPPi]] ]) xs = np.array([ [df_conventional_oc[TCIi], df_conventional_sc[TCIi]], [df_cellulosic_oc[TCIi], df_cellulosic_sc[TCIi]] ]) yticks = 2*[[-30, -15, 0, 15, 30, 45, 60, 75]] xticks = 2*[[200, 300, 400, 500, 600]] bst.plots.plot_kde_2d( ys=ys, xs=xs, xticks=xticks, yticks=yticks, xbox_kwargs=[dict(position=1), dict(position=1)], ybox_kwargs=[dict(position=0), dict(position=0)], ) #%% General Monte Carlo box plots def plot_monte_carlo_across_coordinate(coordinate, data, color_wheel): if isinstance(data, list): return [plot_monte_carlo_across_coordinate(coordinate, i, color_wheel) for i in data] else: color = color_wheel.next() return bst.plots.plot_montecarlo_across_coordinate( coordinate, data, light_color=color.tint(50).RGBn, dark_color=color.shade(50).RGBn, ) def monte_carlo_box_plot(data, positions, light_color, dark_color, width=None, hatch=None, outliers=False, **kwargs): if width is None: width = 0.8 if outliers: flierprops = {'marker':'D', 'markerfacecolor': light_color, 'markeredgecolor': dark_color, 'markersize':3} else: flierprops = {'marker':''} bp = plt.boxplot( x=data, positions=positions, patch_artist=True, widths=width, whis=[5, 95], boxprops={'facecolor':light_color, 'edgecolor':dark_color}, medianprops={'color':dark_color, 'linewidth':1.5}, flierprops=flierprops, **kwargs ) if hatch: for box in bp['boxes']: box.set(hatch = hatch) def plot_monte_carlo(derivative=False, absolute=True, comparison=True, configuration_names=None, comparison_names=None, metrics=None, labels=None, tickmarks=None, agile=True, ncols=1, expand=None, step_min=None, agile_only=False, xrot=None, color_wheel=None, axes_box=None): if derivative: default_configuration_names = ['O1', 'O2'] default_comparison_names = ['O2 - O1'] metric_info = mc_derivative_metric_settings default_metrics = list(metric_info) else: default_configuration_names = oc.configuration_names[:-2] default_comparison_names = oc.comparison_names if comparison: metric_info = mc_comparison_settings else: metric_info = mc_metric_settings if agile_only: default_configuration_names = [i for i in default_configuration_names if '*' in i] default_comparison_names = [i for i in default_comparison_names if '*' in i] default_metrics = ['MFPP', 'TCI', 'production'] else: default_metrics = list(metric_info) if configuration_names is None: configuration_names = default_configuration_names if comparison_names is None: comparison_names = default_comparison_names if metrics is None: metrics = default_metrics combined = absolute and comparison if agile_only: configuration_names = [i for i in configuration_names if '*' in i] comparison_names = [i for i in comparison_names if '*' in i] elif not agile: configuration_names = [i for i in configuration_names if '*' not in i] comparison_names = [i for i in comparison_names if '*' not in i] if combined: columns = configurations = configuration_names + comparison_names elif absolute: columns = configurations = configuration_names elif comparison: columns = configurations = comparison_names else: columns = configurations = [] rows, ylabels, factors = zip(*[metric_info[i] for i in metrics]) factors = [(i, j) for i, j in enumerate(factors) if j is not None] if color_wheel is None: color_wheel = CABBI_colors.wheel() N_rows = len(rows) if axes_box is None: fig, axes_box = plt.subplots(ncols=ncols, nrows=int(round(N_rows / ncols))) plt.subplots_adjust(wspace=0.45) else: fig = None axes = axes_box.transpose() axes = axes.flatten() N_cols = len(columns) xtext = labels or [format_name(i).replace(' ', '') for i in configurations] N_marks = len(xtext) xticks = tuple(range(N_marks)) def get_data(metric, name): try: df = get_monte_carlo(name, metric) except: return np.zeros([1, 1]) else: values = df.values return values def plot(arr, position): if arr.ndim == 2: N = arr.shape[1] width = 0.618 / N boxwidth = 0.618 / (N + 1/N) plots = [] for i in range(N): color = color_wheel.next() boxplot = monte_carlo_box_plot( data=arr[:, i], positions=[position + (i-(N-1)/2)*width], light_color=color.RGBn, dark_color=color.shade(60).RGBn, width=boxwidth, hatch=getattr(color, 'hatch', None), ) plots.append(boxplot) return plots else: color = color_wheel.next() return monte_carlo_box_plot( data=arr, positions=[position], light_color=color.RGBn, dark_color=color.shade(60).RGBn, width=0.618, ) data = np.zeros([N_rows, N_cols], dtype=object) data[:] = [[get_data(i, j) for j in columns] for i in rows] for i, j in factors: data[i, :] *= j if tickmarks is None: tickmarks = [ bst.plots.rounded_tickmarks_from_data( i, step_min=step_min, N_ticks=8, lb_max=0, center=0, f=roundsigfigs, expand=expand, f_min=lambda x: np.percentile(x, 5), f_max=lambda x: np.percentile(x, 95), ) for i in data ] x0 = len(configuration_names) - 0.5 xf = len(columns) - 0.5 for i in range(N_rows): ax = axes[i] plt.sca(ax) if combined: bst.plots.plot_vertical_line(x0) ax.axvspan(x0, xf, color=colors.purple_tint.tint(60).RGBn) plt.xlim(-0.5, xf) for j in range(N_cols): color_wheel.restart() for i in range(N_rows): ax = axes[i] plt.sca(ax) plot(data[i, j], j) plt.ylabel(ylabels[i]) for i in range(N_rows): ax = axes[i] plt.sca(ax) yticks = tickmarks[i] plt.ylim([yticks[0], yticks[1]]) if yticks[0] < 0.: bst.plots.plot_horizontal_line(0, color=CABBI_colors.black.RGBn, lw=0.8, linestyle='--') try: xticklabels = xtext if ax in axes_box[-1] else [] except: xticklabels = xtext if i == N_rows - 1 else [] bst.plots.style_axis(ax, xticks = xticks, yticks = yticks, xticklabels= xticklabels, ytick0=False, ytickf=False, offset_xticks=True, xrot=xrot, ) if fig is None: fig = plt.gcf() else: plt.subplots_adjust(hspace=0) fig.align_ylabels(axes) return fig, axes #%% Spearman def plot_spearman(configurations, labels=None, metric=None, kind=None, with_units=None, legend=None, legend_kwargs=None, **kwargs): if kind is None: kind = 'TEA' if with_units is None: with_units = True if legend is None: legend = True if metric is None: if kind == 'TEA': metric = MFPP metric_name = metric.name elif kind == 'LCA': metric = GWP_economic metric_name = r'GWP$_{\mathrm{economic}}$' else: raise ValueError(f"invalid kind '{kind}'") else: if metric == 'MFPP': metric = MFPP elif metric == 'GWP': metric = GWP_economic metric_name = metric.name stream_price = format_units('USD/L') USD_MT = format_units('USD/MT') ng_price = format_units('USD/m^3') electricity_price = format_units('USD/kWhr') operating_days = format_units('day/yr') capacity = format_units('10^6 MT/yr') titer = format_units('g/L') productivity = format_units('g/L/hr') material_GWP = '$\\mathrm{kg} \\cdot \\mathrm{CO}_{2}\\mathrm{eq} \\cdot \\mathrm{kg}^{-1}$' feedstock_GWP = '$\\mathrm{g} \\cdot \\mathrm{CO}_{2}\\mathrm{eq} \\cdot \\mathrm{kg}^{-1}$' index, ignored_list = zip(*[ ('Crushing mill oil recovery [60 $-$ 95 %]', ['S2', 'S1', 'S2*', 'S1*']), ('Saccharification oil recovery [70 $-$ 95 %]', ['S2', 'S1', 'S2*', 'S1*', 'O1', 'O1*']), (f'Cane operating days [120 $-$ 180 {operating_days}]', []), (f'Sorghum operating days [30 $-$ 60 {operating_days}]', ['S2', 'S1', 'O1', 'O2']), (f'Crushing capacity [1.2 $-$ 2.0 {capacity}]', []), (f'Ethanol price [0.269, 0.476, 0.758 {stream_price}]', []), (f'Relative biodiesel price [0.0819, 0.786, 1.09 {stream_price}]', []), (f'Natural gas price [0.105, 0.122, 0.175 {ng_price}]', ['S1', 'O1', 'S1*', 'O1*']), (f'Electricity price [0.0583, 0.065, 0.069 {electricity_price}]', ['S2', 'O2', 'S2*', 'O2*']), ('IRR [10 $-$ 15 %]', []), (f'Crude glycerol price [100 $-$ 220 {USD_MT}]', ['S2', 'S1', 'S2*', 'S1*']), (f'Pure glycerol price [488 $-$ 812 {USD_MT}]', ['S2', 'S1', 'S2*', 'S1*']), ('Saccharification reaction time [54 $-$ 90 hr]', ['S1', 'O1', 'S1*', 'O1*']), (f'Cellulase price [159 $-$ 265 {USD_MT}]', ['S1', 'O1', 'S1*', 'O1*']), ('Cellulase loading [1.5 $-$ 2.5 wt. % cellulose]', ['S1', 'O1', 'S1*', 'O1*']), ('PTRS base cost [14.9 $-$ 24.7 MMUSD]', ['S1', 'O1', 'S1*', 'O1*']), # ('Pretreatment reactor system base cost [14.9 $-$ 24.7 MMUSD]', ['S1', 'O1', 'S1*', 'O1*']), ('Cane glucose yield [85 $-$ 97.5 %]', ['S1', 'O1', 'S1*', 'O1*']), ('Sorghum glucose yield [85 $-$ 97.5 %]', ['S1', 'O1', 'S1*', 'O1*']), ('Cane xylose yield [65 $-$ 97.5 %]', ['S1', 'O1', 'S1*', 'O1*']), ('Sorghum xylose yield [65 $-$ 97.5 %]', ['S1', 'O1', 'S1*', 'O1*']), ('Glucose to ethanol yield [90 $-$ 95 %]', ['S1', 'O1', 'S1*', 'O1*']), ('Xylose to ethanol yield [50 $-$ 95 %]', ['S1', 'O1', 'S1*', 'O1*']), (f'Titer [65 $-$ 130 {titer}]', ['S1', 'O1', 'S1*', 'O1*']), (f'Productivity [1.0 $-$ 2.0 {productivity}]', ['S1', 'O1', 'S1*', 'O1*']), ('Cane PL content [7.5 $-$ 12.5 %]', ['S2', 'S1', 'S2*', 'S1*']), ('Sorghum PL content [7.5 $-$ 12.5 %]', ['S2', 'S1', 'S2*', 'S1*']), ('Cane FFA content [7.5 $-$ 12.5 %]', ['S2', 'S1', 'S2*', 'S1*']), ('Sorghum FFA content [7.5 $-$ 12.5 %]', ['S2', 'S1', 'S2*', 'S1*']), ('Cane oil content [5 $-$ 15 dry wt. %]', ['S2', 'S1', 'S2*', 'S1*']), ('Relative sorghum oil content [-3 $-$ 0 dry wt. %]', ['S2', 'S1', 'S2*', 'S1*', 'O2', 'O1']), ('TAG to FFA conversion [17.25 $-$ 28.75 % theoretical]', ['S1', 'O1', 'S1*', 'O1*']), # TODO: change lower upper values to baseline +- 10% (f'Feedstock GWPCF [26.3 $-$ 44.0 {feedstock_GWP}]', ['S1', 'S2', 'S1*', 'S2*']), (f'Methanol GWPCF [0.338 $-$ 0.563 {material_GWP}]', ['S1', 'S2', 'S1*', 'S2*']), (f'Pure glycerine GWPCF [1.25 $-$ 2.08 {material_GWP}]', ['S1', 'S2', 'S1*', 'S2*']), (f'Cellulase GWPCF [6.05 $-$ 10.1 {material_GWP}]', ['S1', 'O1', 'S1*', 'O1*']), (f'Natural gas GWPCF [0.297 $-$ 0.363 {material_GWP}]', ['S1', 'O1', 'S1*', 'O1*']), ]) if not with_units: index = [i.split(' [')[0] for i in index] ignored_dct = { 'S1': [], 'O1': [], 'S2': [], 'O2': [], 'S1*': [], 'O1*': [], 'S2*': [], 'O2*': [], } for i, ignored in enumerate(ignored_list): for name in ignored: ignored_dct[name].append(i) index_name = index[i] if kind == 'LCA': for term in ('cost', 'price', 'IRR', 'time', 'capacity'): if term in index_name: for name in ignored_dct: ignored_dct[name].append(i) break elif kind == 'TEA': if 'GWP' in index_name: for name in ignored_dct: ignored_dct[name].append(i) else: raise ValueError(f"invalid kind '{kind}'") rhos = [] for name in configurations: file = spearman_file(name) try: df = pd.read_excel(file, header=[0, 1], index_col=[0, 1]) except: warning = RuntimeWarning(f"file '{file}' not found") warn(warning) continue s = df[metric.index] s.iloc[ignored_dct[name]] = 0. rhos.append(s) color_wheel = [CABBI_colors.orange, CABBI_colors.green_soft, CABBI_colors.blue, CABBI_colors.brown] fig, ax = bst.plots.plot_spearman_2d(rhos, index=index, color_wheel=color_wheel, name=metric_name, **kwargs) if legend: if legend_kwargs is None: legend_kwargs = {'loc': 'lower left'} plt.legend( handles=[ mpatches.Patch( color=color_wheel[i].RGBn, label=labels[i] if labels else format_name(configurations[i]) ) for i in range(len(configurations)) ], **legend_kwargs, ) return fig, ax # %% Other def plot_configuration_breakdown(name, across_coordinate=False, **kwargs): oc.load(name) if across_coordinate: return bst.plots.plot_unit_groups_across_coordinate( oc.set_cane_oil_content, [5, 7.5, 10, 12.5], 'Feedstock oil content [dry wt. %]', oc.unit_groups, colors=[area_colors[i.name].RGBn for i in oc.unit_groups], hatches=[area_hatches[i.name] for i in oc.unit_groups], **kwargs, ) else: def format_total(x): if x < 1e3: return format(x, '.3g') else: x = int(x) n = 10 ** (len(str(x)) - 3) value = int(round(x / n) * n) return format(value, ',') for i in oc.unit_groups: if i.name == 'EtOH prod.': i.name = 'Ethanol production' elif i.name == 'Oil ext.': i.name = 'Oil extraction' elif i.name == 'Biod. prod.': i.name = 'Biodiesel production' i.metrics[0].name = 'Inst. eq.\ncost' i.metrics[3].name = 'Elec.\ncons.' i.metrics[4].name = 'Mat.\ncost' return bst.plots.plot_unit_groups( oc.unit_groups, colors=[area_colors[i.name].RGBn for i in oc.unit_groups], hatches=[area_hatches[i.name] for i in oc.unit_groups], format_total=format_total, fraction=True, legend_kwargs=dict( loc='lower center', ncol=4, bbox_to_anchor=(0, -0.52), labelspacing=1.5, handlelength=2.8, handleheight=1, scale=0.8, ), **kwargs, ) def plot_TCI_areas_across_oil_content(configuration='O2'): oc.load(configuration) data = {i.name: [] for i in oc.unit_groups} increasing_areas = [] decreasing_areas = [] oil_contents = np.linspace(5, 15, 10) for i in oil_contents: oc.set_cane_oil_content(i) oc.sys.simulate() for i in oc.unit_groups: data[i.name].append(i.get_installed_cost()) for name, group_data in data.items(): lb, *_, ub = group_data if ub > lb: increasing_areas.append(group_data) else: decreasing_areas.append(group_data) increasing_values =
np.sum(increasing_areas, axis=0)
numpy.sum
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Jan 8 13:37:56 2020 @author: karliskanders Last updated on 01/04/2020 """ import leidenalg as la import pandas as pd import numpy as np from sklearn.metrics.cluster import adjusted_mutual_info_score as ami_score from sklearn.metrics import confusion_matrix from matplotlib import pyplot as plt import seaborn as sns import igraph as ig from time import time import os class ConsensusClustering: """ Class for determining stable clustering of data by using a 3-step process. First, an ensemble of clustering results is generated by repeatedly applying a clustering algorithm many times (step 1). Then, the ensemble is used to define new edge weights between the graph nodes based on the data point co-clustering occurrences. These weights are then used to generate another "consensus ensemble", which in practice is very stable and exhibits only minor variations between different clustering runs (step 2). To decide which one of the partitions among the "consensus ensemble" should be designated as the final consensus partition, we use adjusted mutual information to compare all partitions within the consensus ensemble, and choose the one which agrees the best with all of the other partitions (step 3). Presently, we use the Leiden community detection algorithm for clustering the graph into communities. However, this class can be easily adapted to use other graph-based clustering algorithms. The consensus clustering approach used here is an adapted version of the intuitively simple but well-performing "Ensemble Clustering for Graphs" method by <NAME> (see https://arxiv.org/abs/1809.05578). """ def __init__( self, graph, N=20, N_consensus=10, verbose=True, seed=None, edge_bootstrap=False ): """ Parameters ---------- graph (igraph.Graph): Graph object that will be used for finding graph communities. N (int): Ensemble size for the first clustering step (normally use 500-1000). N_consensus (int): Ensemble size for the consensus clustering step. verbose (boolean): Determines whether user is informed about the progress regarding the intermediate steps of the clustering procedure. seed (int): Seed for the random number generator; useful for reproducing the exact same clustering result. This seed is then used to generate all other random seeds for each repeated clustering run. edge_bootstrap (boolean): Determines whether edge bootstrapping is used for generating the clustering ensemble. """ self.graph = graph self.directed = graph.is_directed() self.N = N self.N_consensus = N_consensus self.v = verbose self.edge_bootstrap = edge_bootstrap self.w_min = 0.05 # Hard-coded parameter for consensus clustering (step 2) from Poulin & # Theberge publication self._ensemble = None # List of lists containing the ensemble of step 1 clustering results self._consensus_ensemble = None # List of lists containing the ensemble of step 2 clustering results self._COOC = None # Clustering co-occurrence matrix self._consensus_partition = None # Final consensus clustering partition # Manage random seeds if type(seed) != type(None): print("Setting random seeds...") np.random.seed(seed) self.ensemble_seeds = np.random.randint(100000000, size=N) self.consensus_ensemble_seed = np.random.randint(100000000) else: self.ensemble_seeds = None self.consensus_ensemble_seed = None @property def ensemble(self): """ List of clustering results (pertaining to step 1 of the clustering procedure), where each clustering result is a list of integers. These integers correspond to cluster labels. """ if self._ensemble is None: # Generate ensemble of self.N partitions self._ensemble = self.create_ensemble(self.N, weights="weight") # self.clustering_AMI, _ = self.ensemble_AMI(self.ensemble, v=self.v) return self._ensemble @property def COOC(self): """ Co-clustering occurrence matrix: element (i,j) of this matrix indicates how many times nodes i and j were clustered together. """ if self._COOC is None: # Calculate the co-occurrence matrix from the ensemble self._COOC = self.cooccurrence_matrix(self.ensemble) return self._COOC @property def consensus_ensemble(self): """ List of consensus clustering results (pertaining to step 2 of the clustering procedure) where each clustering result is a list of integers. These integers correspond to cluster labels. """ if self._consensus_ensemble is None: # Use the co-occurrence matrix values for consensus clustering weights A = (self.COOC != 0).astype(int) if self.v: print("Using co-occurrence matrix to do consensus clustering...") # Create a new graph and find communities in this new graph g_cooc = build_graph(self.COOC / self.N, kNN=A) clust_cooc = ConsensusClustering( g_cooc, N=self.N_consensus, seed=self.consensus_ensemble_seed ) self._consensus_ensemble = clust_cooc.create_ensemble() return self._consensus_ensemble def load_ensemble(self, ensemble, consensus=False): """ This method can be used to load an external ensemble. For example, you might have stored an ensemble of clustering results from a previous analysis and would now like to recalculate the consensus partition. Parameters ---------- ensemble (list of lists of int): List of clustering results, where each clustering result is a list of integers. These integers correspond to cluster labels. consensus (boolean): Determines whether the ensemble should be treated as the initial ensemble (from step 1) or the consensus ensemble (from step 2). """ if not consensus: self._ensemble = ensemble else: self._consensus_ensemble = ensemble def create_ensemble(self, N=None, weights="weight"): """ Generates ensemble of clustering partitions by repeatedly applying a clustering algorithm many times. Parameters ---------- N (int OR None): Ensemble size for the first clustering step. If N==None, use the class property self.N weights (string OR None): Edge property to use for the community detection Returns ------- ensemble (list of lists of int): List of clustering results, where each clustering result is a list of integers. These integers correspond to cluster labels. """ if N is None: N = self.N ensemble = [] if self.v: print(f"Generating an ensemble with {N} partitions...") for i in range(N): # Choose random seed for the clustering if self.ensemble_seeds is not None: ensemble_seed = self.ensemble_seeds[i] else: ensemble_seed = None # Bootstrapping by removing edges if self.edge_bootstrap == True: graph_ = self.graph.copy() rand_numbers = np.random.rand(len(graph_.es)) edge_weights = graph_.es[weights] # Normalise the edge weights between 0 and 1 edge_weights = np.array(edge_weights) / np.max(edge_weights) # Remove edges based on a probability that is proportional to their weight # (one might want to parameterise this further to tweak the edge removal) id_to_delete = np.where(rand_numbers > edge_weights)[0] graph_.delete_edges(list(id_to_delete)) else: graph_ = self.graph # Community detection p = la.find_partition( graph_, weights=weights, partition_type=la.ModularityVertexPartition, seed=ensemble_seed, ) ensemble.append(p.membership) if self.v: print("x", end="") if self.v: print("") return ensemble @staticmethod def cooccurrence_matrix(ensemble): """ Create the co-clustering occurrence matrix (also called 'cooccurrence matrix'); This can be quite slow for large graphs with ~10K nodes and probably could be optimized, e.g., with numba. Parameters ---------- ensemble (list of lists of int): List of clustering results, where each clustering result is a list of integers. These integers correspond to cluster labels. """ n = len(ensemble[0]) COOC = np.zeros((n, n)) # For each clustering result in the ensemble for i, p in enumerate(ensemble): membership = p # Use pandas to find node pairs with the same cluster labels membership_df = pd.DataFrame( data={"id": list(range(len(membership))), "cluster": membership} ) cooc = membership_df.merge(right=membership_df, on="cluster") cooc = cooc[cooc.id_x < cooc.id_y] # For each node pair with the same cluster labels, add 1 to the # co-clustering occurrence matrix COOC[cooc.id_x.values, cooc.id_y.values] += 1 COOC = COOC + np.triu(COOC).T return COOC @property def consensus_partition(self): """ Final consensus partition of the clustering procedure """ if self._consensus_partition is None: self.consensus_communities() return self._consensus_partition def consensus_communities(self): """ Method for finding the consensus clustering partition, i.e., for the steps 2-3 of the clustering procedure. """ # Measure the stability of the consensus ensemble. If the consensus ensemble # has not been generated yet, it will be by calling the self.consensus_ensemble self.consensus_AMI, AMI_matrix = self.ensemble_AMI( self.consensus_ensemble, v=self.v ) # Take "the most agreeable" partition as the final consensus clustering # partition (i.e., step 3) mean_ami = np.mean(AMI_matrix, axis=1) most_agreeable = np.argsort(mean_ami)[-1] self._consensus_partition = self.consensus_ensemble[most_agreeable] # Describe the final consensus clustering partition char = self.describe_partition(self._consensus_partition, self.v) self.n = char["n"] self.sizes = char["sizes"] @staticmethod def describe_partition(partition, verbose=True): """ Describes the number of clusters and the number of nodes in each cluster """ partition = np.array(partition) clusters = np.unique(partition) n = len(clusters) sizes = [0] * n for c in range(n): sizes[c] = np.sum(partition == c) if verbose: print(f"Clustering with {len(partition)} nodes and {n} clusters.") return {"n": n, "sizes": sizes} @staticmethod def ensemble_AMI(P, v=True): """ Calculates pairwise adjusted mutual information (AMI) scores across the clustering ensemble. Parameters ---------- P (list of lists of int): Clustering ensemble, i.e., a list of clustering results, where each clustering result is a list of integers. These integers correspond to cluster labels. v (boolean): Determines whether information about the results is printed. Returns ------- ami_avg (float): Average adjusted mutual information across the ensemble ami_matrix (numpy.ndarray): The complete matrix with adjusted mutual information scores between all pairs of clustering results """ # If P is not a list of lists but a partition module instead, extract the lists of memberships if type(P[0]) == la.VertexPartition.ModularityVertexPartition: P = [e.membership for e in P] ami_matrix = np.zeros((len(P), len(P))) for i in range(0, len(P)): for j in range(i, len(P)): ami_matrix[i][j] = ami_score(P[i], P[j], average_method="arithmetic") ami_matrix += np.triu(ami_matrix).T
np.fill_diagonal(ami_matrix, 1)
numpy.fill_diagonal
# This module has been generated automatically from space group information # obtained from the Computational Crystallography Toolbox # """ Space groups This module contains a list of all the 230 space groups that can occur in a crystal. The variable space_groups contains a dictionary that maps space group numbers and space group names to the corresponding space group objects. .. moduleauthor:: <NAME> <<EMAIL>> """ #----------------------------------------------------------------------------- # Copyright (C) 2013 The Mosaic Development Team # # Distributed under the terms of the BSD License. The full license is in # the file LICENSE.txt, distributed as part of this software. #----------------------------------------------------------------------------- import numpy as N class SpaceGroup(object): """ Space group All possible space group objects are created in this module. Other modules should access these objects through the dictionary space_groups rather than create their own space group objects. """ def __init__(self, number, symbol, transformations): """ :param number: the number assigned to the space group by international convention :type number: int :param symbol: the Hermann-Mauguin space-group symbol as used in PDB and mmCIF files :type symbol: str :param transformations: a list of space group transformations, each consisting of a tuple of three integer arrays (rot, tn, td), where rot is the rotation matrix and tn/td are the numerator and denominator of the translation vector. The transformations are defined in fractional coordinates. :type transformations: list """ self.number = number self.symbol = symbol self.transformations = transformations self.transposed_rotations = N.array([N.transpose(t[0]) for t in transformations]) self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2] for t in transformations])) def __repr__(self): return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol)) def __len__(self): """ :return: the number of space group transformations :rtype: int """ return len(self.transformations) def symmetryEquivalentMillerIndices(self, hkl): """ :param hkl: a set of Miller indices :type hkl: Scientific.N.array_type :return: a tuple (miller_indices, phase_factor) of two arrays of length equal to the number of space group transformations. miller_indices contains the Miller indices of each reflection equivalent by symmetry to the reflection hkl (including hkl itself as the first element). phase_factor contains the phase factors that must be applied to the structure factor of reflection hkl to obtain the structure factor of the symmetry equivalent reflection. :rtype: tuple """ hkls = N.dot(self.transposed_rotations, hkl) p = N.multiply.reduce(self.phase_factors**hkl, -1) return hkls, p space_groups = {} transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(1, 'P 1', transformations) space_groups[1] = sg space_groups['P 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(2, 'P -1', transformations) space_groups[2] = sg space_groups['P -1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(3, 'P 1 2 1', transformations) space_groups[3] = sg space_groups['P 1 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(4, 'P 1 21 1', transformations) space_groups[4] = sg space_groups['P 1 21 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(5, 'C 1 2 1', transformations) space_groups[5] = sg space_groups['C 1 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(6, 'P 1 m 1', transformations) space_groups[6] = sg space_groups['P 1 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(7, 'P 1 c 1', transformations) space_groups[7] = sg space_groups['P 1 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(8, 'C 1 m 1', transformations) space_groups[8] = sg space_groups['C 1 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(9, 'C 1 c 1', transformations) space_groups[9] = sg space_groups['C 1 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(10, 'P 1 2/m 1', transformations) space_groups[10] = sg space_groups['P 1 2/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(11, 'P 1 21/m 1', transformations) space_groups[11] = sg space_groups['P 1 21/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(12, 'C 1 2/m 1', transformations) space_groups[12] = sg space_groups['C 1 2/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(13, 'P 1 2/c 1', transformations) space_groups[13] = sg space_groups['P 1 2/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(14, 'P 1 21/c 1', transformations) space_groups[14] = sg space_groups['P 1 21/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(15, 'C 1 2/c 1', transformations) space_groups[15] = sg space_groups['C 1 2/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(16, 'P 2 2 2', transformations) space_groups[16] = sg space_groups['P 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(17, 'P 2 2 21', transformations) space_groups[17] = sg space_groups['P 2 2 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(18, 'P 21 21 2', transformations) space_groups[18] = sg space_groups['P 21 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(19, 'P 21 21 21', transformations) space_groups[19] = sg space_groups['P 21 21 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(20, 'C 2 2 21', transformations) space_groups[20] = sg space_groups['C 2 2 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(21, 'C 2 2 2', transformations) space_groups[21] = sg space_groups['C 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(22, 'F 2 2 2', transformations) space_groups[22] = sg space_groups['F 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(23, 'I 2 2 2', transformations) space_groups[23] = sg space_groups['I 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(24, 'I 21 21 21', transformations) space_groups[24] = sg space_groups['I 21 21 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(25, 'P m m 2', transformations) space_groups[25] = sg space_groups['P m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(26, 'P m c 21', transformations) space_groups[26] = sg space_groups['P m c 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(27, 'P c c 2', transformations) space_groups[27] = sg space_groups['P c c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(28, 'P m a 2', transformations) space_groups[28] = sg space_groups['P m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(29, 'P c a 21', transformations) space_groups[29] = sg space_groups['P c a 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(30, 'P n c 2', transformations) space_groups[30] = sg space_groups['P n c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(31, 'P m n 21', transformations) space_groups[31] = sg space_groups['P m n 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(32, 'P b a 2', transformations) space_groups[32] = sg space_groups['P b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(33, 'P n a 21', transformations) space_groups[33] = sg space_groups['P n a 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(34, 'P n n 2', transformations) space_groups[34] = sg space_groups['P n n 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(35, 'C m m 2', transformations) space_groups[35] = sg space_groups['C m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(36, 'C m c 21', transformations) space_groups[36] = sg space_groups['C m c 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(37, 'C c c 2', transformations) space_groups[37] = sg space_groups['C c c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(38, 'A m m 2', transformations) space_groups[38] = sg space_groups['A m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(39, 'A b m 2', transformations) space_groups[39] = sg space_groups['A b m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(40, 'A m a 2', transformations) space_groups[40] = sg space_groups['A m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(41, 'A b a 2', transformations) space_groups[41] = sg space_groups['A b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(42, 'F m m 2', transformations) space_groups[42] = sg space_groups['F m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(43, 'F d d 2', transformations) space_groups[43] = sg space_groups['F d d 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(44, 'I m m 2', transformations) space_groups[44] = sg space_groups['I m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(45, 'I b a 2', transformations) space_groups[45] = sg space_groups['I b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(46, 'I m a 2', transformations) space_groups[46] = sg space_groups['I m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(47, 'P m m m', transformations) space_groups[47] = sg space_groups['P m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(48, 'P n n n :2', transformations) space_groups[48] = sg space_groups['P n n n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(49, 'P c c m', transformations) space_groups[49] = sg space_groups['P c c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(50, 'P b a n :2', transformations) space_groups[50] = sg space_groups['P b a n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(51, 'P m m a', transformations) space_groups[51] = sg space_groups['P m m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(52, 'P n n a', transformations) space_groups[52] = sg space_groups['P n n a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(53, 'P m n a', transformations) space_groups[53] = sg space_groups['P m n a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(54, 'P c c a', transformations) space_groups[54] = sg space_groups['P c c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(55, 'P b a m', transformations) space_groups[55] = sg space_groups['P b a m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(56, 'P c c n', transformations) space_groups[56] = sg space_groups['P c c n'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(57, 'P b c m', transformations) space_groups[57] = sg space_groups['P b c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(58, 'P n n m', transformations) space_groups[58] = sg space_groups['P n n m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(59, 'P m m n :2', transformations) space_groups[59] = sg space_groups['P m m n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(60, 'P b c n', transformations) space_groups[60] = sg space_groups['P b c n'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(61, 'P b c a', transformations) space_groups[61] = sg space_groups['P b c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(62, 'P n m a', transformations) space_groups[62] = sg space_groups['P n m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(63, 'C m c m', transformations) space_groups[63] = sg space_groups['C m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(64, 'C m c a', transformations) space_groups[64] = sg space_groups['C m c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(65, 'C m m m', transformations) space_groups[65] = sg space_groups['C m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(66, 'C c c m', transformations) space_groups[66] = sg space_groups['C c c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(67, 'C m m a', transformations) space_groups[67] = sg space_groups['C m m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(68, 'C c c a :2', transformations) space_groups[68] = sg space_groups['C c c a :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(69, 'F m m m', transformations) space_groups[69] = sg space_groups['F m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,3,3]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,0,3]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(70, 'F d d d :2', transformations) space_groups[70] = sg space_groups['F d d d :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(71, 'I m m m', transformations) space_groups[71] = sg space_groups['I m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(72, 'I b a m', transformations) space_groups[72] = sg space_groups['I b a m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(73, 'I b c a', transformations) space_groups[73] = sg space_groups['I b c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(74, 'I m m a', transformations) space_groups[74] = sg space_groups['I m m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(75, 'P 4', transformations) space_groups[75] = sg space_groups['P 4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(76, 'P 41', transformations) space_groups[76] = sg space_groups['P 41'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(77, 'P 42', transformations) space_groups[77] = sg space_groups['P 42'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(78, 'P 43', transformations) space_groups[78] = sg space_groups['P 43'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(79, 'I 4', transformations) space_groups[79] = sg space_groups['I 4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(80, 'I 41', transformations) space_groups[80] = sg space_groups['I 41'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(81, 'P -4', transformations) space_groups[81] = sg space_groups['P -4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(82, 'I -4', transformations) space_groups[82] = sg space_groups['I -4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(83, 'P 4/m', transformations) space_groups[83] = sg space_groups['P 4/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(84, 'P 42/m', transformations) space_groups[84] = sg space_groups['P 42/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(85, 'P 4/n :2', transformations) space_groups[85] = sg space_groups['P 4/n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(86, 'P 42/n :2', transformations) space_groups[86] = sg space_groups['P 42/n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(87, 'I 4/m', transformations) space_groups[87] = sg space_groups['I 4/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,5,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(88, 'I 41/a :2', transformations) space_groups[88] = sg space_groups['I 41/a :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(89, 'P 4 2 2', transformations) space_groups[89] = sg space_groups['P 4 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(90, 'P 4 21 2', transformations) space_groups[90] = sg space_groups['P 4 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(91, 'P 41 2 2', transformations) space_groups[91] = sg space_groups['P 41 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(92, 'P 41 21 2', transformations) space_groups[92] = sg space_groups['P 41 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(93, 'P 42 2 2', transformations) space_groups[93] = sg space_groups['P 42 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(94, 'P 42 21 2', transformations) space_groups[94] = sg space_groups['P 42 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(95, 'P 43 2 2', transformations) space_groups[95] = sg space_groups['P 43 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(96, 'P 43 21 2', transformations) space_groups[96] = sg space_groups['P 43 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(97, 'I 4 2 2', transformations) space_groups[97] = sg space_groups['I 4 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(98, 'I 41 2 2', transformations) space_groups[98] = sg space_groups['I 41 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(99, 'P 4 m m', transformations) space_groups[99] = sg space_groups['P 4 m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(100, 'P 4 b m', transformations) space_groups[100] = sg space_groups['P 4 b m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(101, 'P 42 c m', transformations) space_groups[101] = sg space_groups['P 42 c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(102, 'P 42 n m', transformations) space_groups[102] = sg space_groups['P 42 n m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(103, 'P 4 c c', transformations) space_groups[103] = sg space_groups['P 4 c c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(104, 'P 4 n c', transformations) space_groups[104] = sg space_groups['P 4 n c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(105, 'P 42 m c', transformations) space_groups[105] = sg space_groups['P 42 m c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(106, 'P 42 b c', transformations) space_groups[106] = sg space_groups['P 42 b c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(107, 'I 4 m m', transformations) space_groups[107] = sg space_groups['I 4 m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(108, 'I 4 c m', transformations) space_groups[108] = sg space_groups['I 4 c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(109, 'I 41 m d', transformations) space_groups[109] = sg space_groups['I 41 m d'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(110, 'I 41 c d', transformations) space_groups[110] = sg space_groups['I 41 c d'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(111, 'P -4 2 m', transformations) space_groups[111] = sg space_groups['P -4 2 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(112, 'P -4 2 c', transformations) space_groups[112] = sg space_groups['P -4 2 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(113, 'P -4 21 m', transformations) space_groups[113] = sg space_groups['P -4 21 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(114, 'P -4 21 c', transformations) space_groups[114] = sg space_groups['P -4 21 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(115, 'P -4 m 2', transformations) space_groups[115] = sg space_groups['P -4 m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(116, 'P -4 c 2', transformations) space_groups[116] = sg space_groups['P -4 c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(117, 'P -4 b 2', transformations) space_groups[117] = sg space_groups['P -4 b 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(118, 'P -4 n 2', transformations) space_groups[118] = sg space_groups['P -4 n 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(119, 'I -4 m 2', transformations) space_groups[119] = sg space_groups['I -4 m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(120, 'I -4 c 2', transformations) space_groups[120] = sg space_groups['I -4 c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(121, 'I -4 2 m', transformations) space_groups[121] = sg space_groups['I -4 2 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(122, 'I -4 2 d', transformations) space_groups[122] = sg space_groups['I -4 2 d'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(123, 'P 4/m m m', transformations) space_groups[123] = sg space_groups['P 4/m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(124, 'P 4/m c c', transformations) space_groups[124] = sg space_groups['P 4/m c c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(125, 'P 4/n b m :2', transformations) space_groups[125] = sg space_groups['P 4/n b m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(126, 'P 4/n n c :2', transformations) space_groups[126] = sg space_groups['P 4/n n c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(127, 'P 4/m b m', transformations) space_groups[127] = sg space_groups['P 4/m b m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(128, 'P 4/m n c', transformations) space_groups[128] = sg space_groups['P 4/m n c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(129, 'P 4/n m m :2', transformations) space_groups[129] = sg space_groups['P 4/n m m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(130, 'P 4/n c c :2', transformations) space_groups[130] = sg space_groups['P 4/n c c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(131, 'P 42/m m c', transformations) space_groups[131] = sg space_groups['P 42/m m c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(132, 'P 42/m c m', transformations) space_groups[132] = sg space_groups['P 42/m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(133, 'P 42/n b c :2', transformations) space_groups[133] = sg space_groups['P 42/n b c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(134, 'P 42/n n m :2', transformations) space_groups[134] = sg space_groups['P 42/n n m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(135, 'P 42/m b c', transformations) space_groups[135] = sg space_groups['P 42/m b c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(136, 'P 42/m n m', transformations) space_groups[136] = sg space_groups['P 42/m n m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(137, 'P 42/n m c :2', transformations) space_groups[137] = sg space_groups['P 42/n m c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(138, 'P 42/n c m :2', transformations) space_groups[138] = sg space_groups['P 42/n c m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(139, 'I 4/m m m', transformations) space_groups[139] = sg space_groups['I 4/m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(140, 'I 4/m c m', transformations) space_groups[140] = sg space_groups['I 4/m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(141, 'I 41/a m d :2', transformations) space_groups[141] = sg space_groups['I 41/a m d :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,5,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(142, 'I 41/a c d :2', transformations) space_groups[142] = sg space_groups['I 41/a c d :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(143, 'P 3', transformations) space_groups[143] = sg space_groups['P 3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(144, 'P 31', transformations) space_groups[144] = sg space_groups['P 31'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(145, 'P 32', transformations) space_groups[145] = sg space_groups['P 32'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(146, 'R 3 :H', transformations) space_groups[146] = sg space_groups['R 3 :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(147, 'P -3', transformations) space_groups[147] = sg space_groups['P -3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(148, 'R -3 :H', transformations) space_groups[148] = sg space_groups['R -3 :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(149, 'P 3 1 2', transformations) space_groups[149] = sg space_groups['P 3 1 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(150, 'P 3 2 1', transformations) space_groups[150] = sg space_groups['P 3 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(151, 'P 31 1 2', transformations) space_groups[151] = sg space_groups['P 31 1 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(152, 'P 31 2 1', transformations) space_groups[152] = sg space_groups['P 31 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(153, 'P 32 1 2', transformations) space_groups[153] = sg space_groups['P 32 1 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(154, 'P 32 2 1', transformations) space_groups[154] = sg space_groups['P 32 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(155, 'R 3 2 :H', transformations) space_groups[155] = sg space_groups['R 3 2 :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(156, 'P 3 m 1', transformations) space_groups[156] = sg space_groups['P 3 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(157, 'P 3 1 m', transformations) space_groups[157] = sg space_groups['P 3 1 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(158, 'P 3 c 1', transformations) space_groups[158] = sg space_groups['P 3 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(159, 'P 3 1 c', transformations) space_groups[159] = sg space_groups['P 3 1 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(160, 'R 3 m :H', transformations) space_groups[160] = sg space_groups['R 3 m :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(161, 'R 3 c :H', transformations) space_groups[161] = sg space_groups['R 3 c :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(162, 'P -3 1 m', transformations) space_groups[162] = sg space_groups['P -3 1 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(163, 'P -3 1 c', transformations) space_groups[163] = sg space_groups['P -3 1 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(164, 'P -3 m 1', transformations) space_groups[164] = sg space_groups['P -3 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(165, 'P -3 c 1', transformations) space_groups[165] = sg space_groups['P -3 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(166, 'R -3 m :H', transformations) space_groups[166] = sg space_groups['R -3 m :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,-1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,-1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,-1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(167, 'R -3 c :H', transformations) space_groups[167] = sg space_groups['R -3 c :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(168, 'P 6', transformations) space_groups[168] = sg space_groups['P 6'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(169, 'P 61', transformations) space_groups[169] = sg space_groups['P 61'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(170, 'P 65', transformations) space_groups[170] = sg space_groups['P 65'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(171, 'P 62', transformations) space_groups[171] = sg space_groups['P 62'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(172, 'P 64', transformations) space_groups[172] = sg space_groups['P 64'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(173, 'P 63', transformations) space_groups[173] = sg space_groups['P 63'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(174, 'P -6', transformations) space_groups[174] = sg space_groups['P -6'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(175, 'P 6/m', transformations) space_groups[175] = sg space_groups['P 6/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(176, 'P 63/m', transformations) space_groups[176] = sg space_groups['P 63/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(177, 'P 6 2 2', transformations) space_groups[177] = sg space_groups['P 6 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(178, 'P 61 2 2', transformations) space_groups[178] = sg space_groups['P 61 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(179, 'P 65 2 2', transformations) space_groups[179] = sg space_groups['P 65 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(180, 'P 62 2 2', transformations) space_groups[180] = sg space_groups['P 62 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(181, 'P 64 2 2', transformations) space_groups[181] = sg space_groups['P 64 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(182, 'P 63 2 2', transformations) space_groups[182] = sg space_groups['P 63 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(183, 'P 6 m m', transformations) space_groups[183] = sg space_groups['P 6 m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(184, 'P 6 c c', transformations) space_groups[184] = sg space_groups['P 6 c c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(185, 'P 63 c m', transformations) space_groups[185] = sg space_groups['P 63 c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(186, 'P 63 m c', transformations) space_groups[186] = sg space_groups['P 63 m c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(187, 'P -6 m 2', transformations) space_groups[187] = sg space_groups['P -6 m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(188, 'P -6 c 2', transformations) space_groups[188] = sg space_groups['P -6 c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot =
N.array([1,0,0,0,1,0,0,0,-1])
numpy.array
def example(Simulator): from csdl import Model import csdl import numpy as np class ExampleSimple(Model): def define(self): # add_input nx = 3 ny = 4 mesh = np.zeros((nx, ny, 3)) mesh[:, :, 0] = np.outer(
np.arange(nx)
numpy.arange
# test point in polygon functions import os # import math as maths import numpy as np import resqpy.olio.point_inclusion as pip # from numpy.testing import assert_array_almost_equal def test_pip_cn_and_wn(): # unit square polygon poly = np.array([(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)]) p_in = np.array([(0.00001, 0.00001), (0.00001, 0.99999), (0.99999, 0.00001), (0.99999, 0.99999)]) p_out = np.array([(1.1, 0.1), (-0.1, 0.2), (0.5, 1.00001), (0.4, -0.0001), (1.00001, 1.00001), (1.00001, -0.00001)]) for pip_fn in [pip.pip_cn, pip.pip_wn]: assert pip_fn((0.5, 0.5), poly) for p in p_in: assert pip_fn(p, poly) for p in p_out: assert not pip_fn(p, poly) assert np.all(pip.pip_array_cn(p_in, poly)) assert not np.any(pip.pip_array_cn(p_out, poly)) def test_figure_of_eight(): fig_8 = np.array([(-100.0, -200.0), (100.0, 200.0), (-100.0, 200.0), (100.0, -200, 0)]) p_in = np.array([(-99.0, -199.0), (0.0, -1.0), (99.0, 199.0), (0.0, 1.0), (49.9, -100.0)]) p_out = np.array([(1000.0, -23.0), (1.0, 0.0), (-0.001, 0.0), (-50.1, 100.0)]) for pip_fn in (pip.pip_cn, pip.pip_wn): for p in p_in: assert pip_fn(p, fig_8) for p in p_out: assert not pip_fn(p, fig_8) assert np.all(pip.pip_array_cn(p_in, fig_8)) assert not np.any(pip.pip_array_cn(p_out, fig_8)) def test_points_in_polygon(tmp_path): # create an ascii file holding vertices of a polygon poly_file = os.path.join(tmp_path, 'diamond.txt') diamond = np.array([(0.0, 3.0, 0.0), (3.0, 6.0, -1.3), (6.0, 3.0, 12.5), (3.0, 0.0, 0.0)]) with open(poly_file, 'w') as fp: for xyz in diamond: fp.write(f'{xyz[0]} {xyz[1]} {xyz[2]}\n') fp.write('999.0 999.0 999.0\n') # test some points with no multiplier applied to polygon geometry p_in = np.array([(3.0, 3.0), (0.1, 3.0), (1.55, 1.55), (4.49, 4.49), (5.99, 3.0), (3.1, 0.11)]) p_out =
np.array([(-3.0, -3.0), (2.0, 0.99), (4.51, 4.51), (6.01, 3.0)])
numpy.array
#!/usr/bin/env python import os,sys sys.path.insert(1, os.path.join(sys.path[0], '..')) import argparse from multiagent.environment import MultiAgentEnv import multiagent.scenarios as scenarios import numpy as np import keras.backend.tensorflow_backend as backend from keras.models import Sequential from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten from keras.optimizers import Adam from keras.callbacks import TensorBoard import tensorflow as tf from collections import deque import time import random from tqdm import tqdm from PIL import Image if __name__ == '__main__': # parse arguments parser = argparse.ArgumentParser(description=None) parser.add_argument('-s', '--scenario', default='simple.py', help='Path of the scenario Python script.') args = parser.parse_args() # load scenario from script scenario = scenarios.load(args.scenario).Scenario() # create world world = scenario.make_world() # create multiagent environment env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation, info_callback=None, shared_viewer = False) # render call to create viewer window (necessary only for interactive policies) env.render() # execution loop obs_n = env.reset() DISCOUNT = 0.99 REPLAY_MEMORY_SIZE = 50_000 # How many last steps to keep for model training MIN_REPLAY_MEMORY_SIZE = 1_000 # Minimum number of steps in a memory to start training MINIBATCH_SIZE = 64 # How many steps (samples) to use for training UPDATE_TARGET_EVERY = 5 # Terminal states (end of episodes) MODEL_NAME = '2x256' MIN_REWARD = 20 # For model save MEMORY_FRACTION = 0.20 # Environment settings EPISODES = 200 # Exploration settings epsilon = 1 # not a constant, going to be decayed EPSILON_DECAY = 0.99975 MIN_EPSILON = 0.001 # Stats settings AGGREGATE_STATS_EVERY = 50 # episodes SHOW_PREVIEW = False # For stats ep_rewards = [[-200],[-200],[-200]] # For more repetitive results random.seed(1) np.random.seed(1) tf.set_random_seed(1) # Memory fraction, used mostly when trai8ning multiple agents #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=MEMORY_FRACTION) #backend.set_session(tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))) # Create models folder if not os.path.isdir('models'): os.makedirs('models') # Own Tensorboard class class ModifiedTensorBoard(TensorBoard): # Overriding init to set initial step and writer (we want one log file for all .fit() calls) def __init__(self, **kwargs): super().__init__(**kwargs) self.step = 1 self.writer = tf.summary.FileWriter(self.log_dir) # Overriding this method to stop creating default log writer def set_model(self, model): pass # Overrided, saves logs with our step number # (otherwise every .fit() will start writing from 0th step) def on_epoch_end(self, epoch, logs=None): self.update_stats(**logs) # Overrided # We train for one batch only, no need to save anything at epoch end def on_batch_end(self, batch, logs=None): pass # Overrided, so won't close writer def on_train_end(self, _): pass # Custom method for saving own metrics # Creates writer, writes custom metrics and closes writer def update_stats(self, **stats): self._write_logs(stats, self.step) # Agent class class DQNAgent: def __init__(self,i): self.index=i # Main model self.model = self.create_model() # Target network self.target_model = self.create_model() self.target_model.set_weights(self.model.get_weights()) # An array with last n steps for training self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE) # Custom tensorboard object self.tensorboard = ModifiedTensorBoard(log_dir="logs/{}-{}-{}".format(MODEL_NAME, self.index,int(time.time()))) # Used to count when to update target network with main network's weights self.target_update_counter = 0 def create_model(self): model = Sequential() model.add(Conv2D(256, (3, 3), input_shape=(10, 10, 3))) # OBSERVATION_SPACE_VALUES = (10, 10, 3) a 10x10 RGB image. model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(256, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors model.add(Dense(64)) model.add(Dense(5, activation='linear')) # ACTION_SPACE_SIZE = how many choices (9) model.compile(loss="mse", optimizer=Adam(lr=0.001), metrics=['accuracy']) return model # Adds step's data to a memory replay array # (observation space, action, reward, new observation space, done) def update_replay_memory(self, transition): self.replay_memory.append(transition) # Trains main network every step during episode def train(self, terminal_state, step): # Start training only if certain number of samples is already saved if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE: return # Get a minibatch of random samples from memory replay table minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE) # Get current states from minibatch, then query NN model for Q values current_states = np.array([transition[0] for transition in minibatch])/255 current_qs_list = self.model.predict(current_states) # Get future states from minibatch, then query NN model for Q values # When using target network, query it, otherwise main network should be queried new_current_states = np.array([transition[3] for transition in minibatch])/255 future_qs_list = self.target_model.predict(new_current_states) X = [] y = [] # Now we need to enumerate our batches for index, (current_state, action, reward, new_current_state, done) in enumerate(minibatch): # If not a terminal state, get new q from future states, otherwise set it to 0 # almost like with Q Learning, but we use just part of equation here if not done: max_future_q = np.max(future_qs_list[index]) new_q = reward + DISCOUNT * max_future_q else: new_q = reward # Update Q value for given state current_qs = current_qs_list[index] current_qs[action] = new_q # And append to our training data X.append(current_state) y.append(current_qs) # Fit on all samples as one batch, log only on terminal state self.model.fit(
np.array(X)
numpy.array
import pickle import time from distil.utils.utils import LabeledToUnlabeledDataset from distil.utils.config_helper import read_config_file from distil.utils.train_helper import data_train from distil.utils.models.simple_net import TwoLayerNet from distil.active_learning_strategies import GLISTER, BADGE, EntropySampling, RandomSampling, LeastConfidenceSampling, \ MarginSampling, CoreSet, AdversarialBIM, AdversarialDeepFool, KMeansSampling, \ BALDDropout, FASS from distil.utils.models.resnet import ResNet18 from torchvision import datasets, transforms from torch.utils.data import Subset, TensorDataset, ConcatDataset import torch import numpy as np import sys from sklearn.preprocessing import StandardScaler import argparse sys.path.append('./') class TrainClassifier: def __init__(self, config_file): self.config_file = config_file self.config = read_config_file(config_file) def getModel(self, model_config): if model_config['architecture'] == 'resnet18': if ('target_classes' in model_config) and ('channel' in model_config): net = ResNet18( num_classes=model_config['target_classes'], channels=model_config['channel']) elif 'target_classes' in model_config: net = ResNet18(num_classes=model_config['target_classes']) else: net = ResNet18() elif model_config['architecture'] == 'two_layer_net': net = TwoLayerNet( model_config['input_dim'], model_config['target_classes'], model_config['hidden_units_1']) return net def libsvm_file_load(self, path, dim, save_data=False): data = [] target = [] with open(path) as fp: line = fp.readline() while line: temp = [i for i in line.strip().split(" ")] # Class Number. # Not assumed to be in (0, K-1) target.append(int(float(temp[0]))) temp_data = [0]*dim for i in temp[1:]: ind, val = i.split(':') temp_data[int(ind)-1] = float(val) data.append(temp_data) line = fp.readline() X_data = np.array(data, dtype=np.float32) Y_label =
np.array(target)
numpy.array
import numpy as np from scipy.io import wavfile import SignalUtils as su from os import listdir import random import matplotlib.pyplot as plt from keras.callbacks import EarlyStopping from keras.models import Sequential from keras.layers import Dense from keras import optimizers from config import get_mapping_paths as paths paths_mapping = paths()# [("D:/dataset/combine/", 1), ("D:/dataset/other/", 0), ("D:/dataset/voice/", 1)] files_mapping = [] for path_mapping in paths_mapping: files_mapping.extend([(path_mapping[0] + file, path_mapping[1]) for file in listdir(path_mapping[0])]) random.shuffle(files_mapping) test_index = int(0.6 * len(files_mapping)) train_samples = files_mapping[0:test_index] test_samples = files_mapping[test_index:len(files_mapping)] batch_size=32 look_back = 5 epochs = 200 model_file = "d:/dataset/simple_model.h5" callback = [EarlyStopping(monitor='val_loss', patience=5, mode='auto')] model = Sequential() model.add(Dense(100, input_dim=look_back*161)) model.add(Dense(60, activation='sigmoid')) model.add(Dense(60, activation='sigmoid')) model.add(Dense(120, activation='sigmoid')) model.add(Dense(1, activation='step')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.load_weights(model_file) min = -40 max = 40 predictions = [] for sample in test_samples: X = su.spectrogram_from_file(filename=sample[0], max_freq=8000) if X is None: continue; X = su.prepare_simple_feedforward_data(X, look_back=look_back)[0].reshape(1, look_back*161) X = np.asarray([(sample-min)/(max-min) for sample in X]).reshape(1, look_back*161) y = sample[1] prediction = model.predict(X, batch_size=1, verbose=2) if prediction > 0.5: prediction = 1 else: prediction = 0 predictions.append(prediction == y) #train_sample = random.choice(test_samples) # X_test = su.spectrogram_from_file(filename=train_sample[0], max_freq=8000) # if X is None: # continue; # X_test = nn.prepare_feedforward_data(X_test, look_back=look_back) #y_test = np.ones(X_test.shape[0]) * sample[1] #print("prediction after batch train ", nn.model.predict(X_test, batch_size=1, verbose=2)) #print('evaluation after batch: ', nn.evaluate(X, y)) #print('evaluation of test after batch: ', nn.evaluate(X_test, y_test)) train_sample = random.choice(test_samples) X_test = su.spectrogram_from_file(filename=train_sample[0], max_freq=8000) X_test = su.prepare_feedforward_data(X_test, look_back = 5) y_test = np.ones(X_test.shape[0]) * sample[1] weights = nn.model.get_weights() predict_nn = NN(tsteps=look_back * 23, batch_size=1, epochs=1) predict_nn.create_feed_forward() predict_nn.model.set_weights(weights) print("prediction ", nn.model.predict(X_test, batch_size=1, verbose=2)) scores = [] for sample in test_samples: X = su.spectrogram_from_file(filename=sample[0], max_freq=8000) if X is None: continue; X = nn.prepare_feedforward_data(X, look_back = 5) for i in range(nn.epochs): y =
np.ones(X.shape[0])
numpy.ones
# coding=utf-8 """Fiducial Registration Educational Demonstration tests""" import math import numpy as np from sksurgeryfredbe.algorithms.errors import expected_absolute_value import sksurgeryfredbe.algorithms.point_based_reg as pbreg def _make_circle_fiducials(no_fids, centre, radius, fixed_stddevs, moving_stddevs): fixed_fids = np.zeros(shape=(no_fids, 3), dtype=np.float64) moving_fids = np.zeros(shape=(no_fids, 3), dtype=np.float64) angle_inc = math.pi * 2.0 / float(no_fids) for fid in range(no_fids): fixed_fids[fid] = ([radius * math.cos(angle_inc*fid), radius * math.sin(angle_inc*fid), 0.0] + np.random.normal(scale=fixed_stddevs) + centre) moving_fids[fid] = ([radius * math.cos(angle_inc*fid), radius * math.sin(angle_inc*fid), 0.0] + np.random.normal(scale=moving_stddevs) + centre) return fixed_fids, moving_fids def test_pbr_3_fids(): """ Tests for tre_from_fle_2d """ fixed_fle_std_dev = np.array([1.0, 1.0, 1.0], dtype=np.float64) moving_fle_std_dev = np.array([0.0, 0.0, 0.0], dtype=np.float64) fixed_fle_easv = expected_absolute_value(fixed_fle_std_dev) moving_fle_easv = expected_absolute_value(moving_fle_std_dev) target = np.array([[0.0, 0.0, 0.0]], dtype=np.float64) pbr = pbreg.PointBasedRegistration(target, fixed_fle_easv, moving_fle_easv) centre = np.array([0.0, 0.0, 0.0], dtype=np.float64) radius = 20.0 tresq_sum = 0 fresq_sum = 0 expected_tre_squared = 0 expected_fre = 0 repeats = 100 np.random.seed(0) for _ in range(repeats): fixed_fids, moving_fids = _make_circle_fiducials(3, centre, radius, fixed_fle_std_dev, moving_fle_std_dev) [_success, fre, _mean_fle, expected_tre_squared, expected_fre, _transformed_target_2d, actual_tre, _no_fids] = pbr.register( fixed_fids, moving_fids) tresq_sum += actual_tre*actual_tre fresq_sum += fre*fre ave_tresq = tresq_sum/repeats ave_fresq = fresq_sum/repeats assert np.isclose(ave_tresq, expected_tre_squared, atol=0.0, rtol=0.10) assert np.isclose(ave_fresq, expected_fre, atol=0.0, rtol=0.05) def test_pbr_10_fids(): """ Tests for tre_from_fle_2d """ fixed_fle_std_dev =
np.array([1.0, 1.0, 1.0], dtype=np.float64)
numpy.array
from keras.preprocessing import image from keras.applications.inception_v3 import InceptionV3, preprocess_input from keras.applications.vgg19 import VGG19 from keras.applications.vgg19 import preprocess_input as vgg_preprocess_input from keras.models import Model, load_model from keras.layers import Input import numpy as np from moviepy.video.io.VideoFileClip import VideoFileClip from PIL import Image import getopt import sys sample_fold = './SampleVidImg' class Extractor(): def __init__(self, weights=None, layer='avg_pool'): """Either load pretrained from imagenet, or load our saved weights from our own training.""" self.weights = weights # so we can check elsewhere which model if weights is None: # Get model with pretrained weights. input_tensor = Input(shape=(299, 299, 3)) base_model = InceptionV3( input_shape=(299, 299, 3), weights='imagenet', include_top=True ) # We'll extract features at the final pool layer. self.model = Model( input=base_model.input, output=base_model.get_layer(layer).output ) else: # Load the model first. self.model = load_model(weights) # Then remove the top so we get features not predictions. # From: https://github.com/fchollet/keras/issues/2371 self.model.layers.pop() self.model.layers.pop() # two pops to get to pool layer self.model.outputs = [self.model.layers[-1].output] self.model.output_layers = [self.model.layers[-1]] self.model.layers[-1].outbound_nodes = [] def extract(self, image_path): img = image.load_img(image_path, target_size=(299, 299)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) # Get the prediction. features = self.model.predict(x) if self.weights is None: # For imagenet/default network: features = features[0] else: # For loaded network: features = features[0] return features def extract_PIL(self, img): img = img.resize((299, 299)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) # Get the prediction. features = self.model.predict(x) if self.weights is None: # For imagenet/default network: features = features[0] else: # For loaded network: features = features[0] return features class VGGExtractor(): def __init__(self, weights=None): """Either load pretrained from imagenet, or load our saved weights from our own training.""" self.weights = weights # so we can check elsewhere which model if weights is None: # Get model with pretrained weights. input_tensor = Input(shape=(224, 224, 3)) base_model = VGG19(weights='imagenet', include_top=True) # We'll extract features at the final pool layer. self.model = Model( input=base_model.input, output=base_model.layers[-3].output ) else: # Load the model first. self.model = load_model(weights) # Then remove the top so we get features not predictions. # From: https://github.com/fchollet/keras/issues/2371 self.model.layers.pop() self.model.layers.pop() self.model.layers.pop() # two pops to get to pool layer self.model.outputs = [self.model.layers[-1].output] self.model.output_layers = [self.model.layers[-1]] self.model.layers[-1].outbound_nodes = [] def extract(self, image_path): img = image.load_img(image_path, target_size=(224, 224)) x = image.img_to_array(img) x =
np.expand_dims(x, axis=0)
numpy.expand_dims
from pandas import read_csv import os import numpy as np import csv basePath = "results/ARIMA/" datasateBaseName = "ukdale_def" def save_accuracy_to_csv(values,path,day,seriesName): if not os.path.isdir(path): try: os.mkdir(path) except OSError: print("Creation of the directory %s failed" % path) with open(path + "/" + "accuracy.csv", mode="a+") as csv_file: lines = csv_file.readlines() fieldnames = ['mape', 'corr', 'rmse', 'minmax', 'seriesName', 'days'] writer = csv.DictWriter(csv_file, fieldnames=fieldnames) if os.stat(path + "/" + "accuracy.csv").st_size == 0: writer.writerow({'mape':values.get("mape"),'corr':values.get("corr"),'rmse':values.get("rmse"),'minmax':values.get("minmax"), 'seriesName':seriesName, 'days':str(int(day))}) else: writer.writerow({'mape':values.get("mape"),'corr':values.get("corr"),'rmse':values.get("rmse"),'minmax':values.get("minmax"), 'seriesName':seriesName, 'days':str(int(day))}) # Accuracy metrics def forecast_accuracy(forecast, actual): mape = np.mean(np.abs(forecast - actual)/np.abs(actual)) # MAPE corr = np.corrcoef(forecast, actual)[0,1] # corr rmse =
np.mean((forecast - actual)**2)
numpy.mean
# -*- coding: utf-8 -*- import os import math import numpy as np np.set_printoptions(suppress=True) import scipy import scipy.stats from scipy.fftpack import fft, ifft from scipy import optimize as opti from scipy.signal import convolve import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from mpl_axes_aligner import align import numba from JPwaptool import JPwaptool import h5py from scipy.interpolate import interp1d plt.rcParams['savefig.dpi'] = 300 plt.rcParams['figure.dpi'] = 300 plt.rcParams['font.size'] = 8 plt.rcParams['lines.markersize'] = 4.0 plt.rcParams['lines.linewidth'] = 1.0 plt.rcParams['mathtext.fontset'] = 'cm' def xiaopeip(wave, spe_pre, eta=0): l = len(wave) flag = 1 lowp = np.argwhere(wave > spe_pre['thres']).flatten() # lowp = rm_frag(lowp) if len(lowp) != 0: fitp = np.arange(lowp.min() - spe_pre['mar_l'], lowp.max() + spe_pre['mar_r']) fitp = np.unique(np.clip(fitp, 0, len(wave)-1)) pet = lowp - spe_pre['peak_c'] pet = np.unique(np.clip(pet, 0, len(wave)-1)) if len(pet) != 0: # pwe, ped = xiaopeip_core(wave, spe_pre['spe'], fitp, pet, eta=eta) pwe = xiaopeip_core(wave, spe_pre['spe'], fitp, pet, eta=eta) else: flag = 0 else: flag = 0 if flag == 0: pet = np.array([np.argmax(wave[spe_pre['peak_c']:])]) pwe = np.array([1]) # return pet, pwe, ped return pet, pwe # def xiaopeip_core(wave, spe, fitp, possible, eta=0): # l = len(wave) # spe = np.concatenate([spe, np.zeros(l - spe.shape[0])]) # ans0 = np.zeros(len(possible)+1).astype(np.float64) # ans0[-1] = wave.min() # b = np.zeros((len(possible)+1, 2)).astype(np.float64) # b[-1, 0] = -np.inf # b[:, 1] = np.inf # mne = spe[np.mod(fitp.reshape(fitp.shape[0], 1) - possible.reshape(1, possible.shape[0]), l)] # ans = opti.fmin_l_bfgs_b(norm_fit, ans0, args=(mne, wave[fitp], eta), approx_grad=True, bounds=b, maxfun=500000) # # ans = opti.fmin_slsqp(norm_fit, ans0, args=(mne, wave[fitp]), bounds=b, iprint=-1, iter=500000) # # ans = opti.fmin_tnc(norm_fit, ans0, args=(mne, wave[fitp]), approx_grad=True, bounds=b, messages=0, maxfun=500000) # pf = ans[0] # return pf[:-1], pf[-1] # def norm_fit(x, M, y, eta=0): # return np.power(y - x[-1] - np.matmul(M, x[:-1]), 2).sum() + eta * x.sum() def xiaopeip_core(wave, spe, fitp, possible, eta=0): l = len(wave) spe = np.concatenate([spe, np.zeros(l - spe.shape[0])]) ans0 = np.zeros(len(possible)).astype(np.float64) b = np.zeros((len(possible), 2)).astype(np.float64) b[:, 1] = np.inf mne = spe[np.mod(fitp.reshape(fitp.shape[0], 1) - possible.reshape(1, possible.shape[0]), l)] try: ans = opti.fmin_l_bfgs_b(norm_fit, ans0, args=(mne, wave[fitp], eta), approx_grad=True, bounds=b, maxfun=500000) except ValueError: ans = [np.ones(len(possible)) * 0.2] # ans = opti.fmin_slsqp(norm_fit, ans0, args=(mne, wave[fitp]), bounds=b, iprint=-1, iter=500000) # ans = opti.fmin_tnc(norm_fit, ans0, args=(mne, wave[fitp]), approx_grad=True, bounds=b, messages=0, maxfun=500000) return ans[0] def norm_fit(x, M, y, eta=0): return np.power(y - np.matmul(M, x), 2).sum() + eta * x.sum() def rm_frag(lowp): t = np.argwhere(np.diff(lowp) > 1).flatten() ind = np.vstack((np.insert(t + 1, 0, 0), np.append(t, len(lowp)))).T slices = [lowp[ind[i][0] : ind[i][1]] for i in range(len(ind))] t = [slices[i] for i in range(len(slices)) if len(slices[i]) > 1] if len(t) == 0: lowp = np.array([]) else: lowp = np.concatenate((t), axis=0) return lowp def lucyddm(waveform, spe_pre, iterations=100): '''Lucy deconvolution Parameters ---------- waveform : 1d array spe : 1d array point spread function; single photon electron response iterations : int Returns ------- signal : 1d array References ---------- .. [1] https://en.wikipedia.org/wiki/Richardson%E2%80%93Lucy_deconvolution .. [2] https://github.com/scikit-image/scikit-image/blob/master/skimage/restoration/deconvolution.py#L329 ''' moveDelta = 9 spe = np.append(np.zeros(len(spe_pre['spe']) - 2 * moveDelta - 1), np.abs(spe_pre['spe'])) waveform = np.where(waveform < 0, 0.0001, waveform) waveform = waveform.astype(np.float) spe = spe.astype(np.float) waveform = waveform / np.sum(spe) wave_deconv = np.array(waveform) spe_mirror = spe[::-1] for _ in range(iterations): relative_blur = waveform / convolve(wave_deconv, spe, mode='same') wave_deconv *= convolve(relative_blur, spe_mirror, mode='same') # there is no need to set the bound if the spe and the wave are all none negative return np.arange(0, len(waveform)-moveDelta), wave_deconv[moveDelta:] def waveformfft(wave, spe_pre): length = len(wave) spefft = fft(spe_pre['spe'], 2*length) wavef = fft(wave, 2*length) wavef[(length-int(length*0.7)):(length+int(length*0.7))] = 0 signalf = np.true_divide(wavef, spefft) recon = np.real(ifft(signalf, 2*length)) return np.arange(length), recon[:length] def threshold(wave, spe_pre): pet = np.argwhere(wave[spe_pre['peak_c']:] > spe_pre['thres'] * 2).flatten() pwe = wave[spe_pre['peak_c']:][pet] pwe = pwe / pwe.sum() * np.abs(wave.sum()) / spe_pre['spe'].sum() if len(pet) == 0: pet = np.array([np.argmax(wave[spe_pre['peak_c']:])]) pwe = np.array([1]) return pet, pwe def read_model(spe_path): with h5py.File(spe_path, 'r', libver='latest', swmr=True) as speFile: cid = speFile['SinglePE'].attrs['ChannelID'] epulse = speFile['SinglePE'].attrs['Epulse'] spe = speFile['SinglePE'].attrs['SpePositive'] thres = speFile['SinglePE'].attrs['Thres'] spe_pre = {} fig = plt.figure() fig.tight_layout() ax = fig.add_subplot(111) for i in range(len(spe)): peak_c = np.argmax(spe[i]); t = np.argwhere(spe[i][peak_c:] < 0.1).flatten()[0] + peak_c mar_l = np.sum(spe[i][:peak_c] < thres[i]) mar_r = np.sum(spe[i][peak_c:t] < thres[i]) spe_pre_i = {'spe':spe[i], 'epulse':epulse, 'peak_c':peak_c, 'mar_l':mar_l, 'mar_r':mar_r, 'thres':thres[i]} spe_pre.update({cid[i]:spe_pre_i}) ax.plot(spe_pre[cid[i]]['spe']) ax.grid() ax.set_xlabel('$Time/\mathrm{ns}$') ax.set_ylabel('$Voltage/\mathrm{mV}$') fig.savefig('img/spe.png', bbox_inches='tight') plt.close() return spe_pre def clip(pet, pwe, thres): if len(pet[pwe > thres]) == 0: pet = np.array([pet[np.argmax(pwe)]]) pwe = np.array([1]) else: pet = pet[pwe > thres] pwe = pwe[pwe > thres] return pet, pwe def snip_baseline(waveform, itera=20): wm = np.min(waveform) waveform = waveform - wm v = np.log(np.log(np.sqrt(waveform+1)+1)+1) N = waveform.shape[0] for i in range(itera): v[i:N-i] = np.minimum(v[i:N-i], (v[:N-2*i] + v[2*i:])/2) w = np.power(np.exp(np.exp(v) - 1) - 1, 2) - 1 + wm return w def demo(pet, pwe, tth, spe_pre, leng, wave, cid, mode, full=False): penum = len(tth) print('PEnum is {}'.format(penum)) pf0 = np.zeros(leng); pf1 = np.zeros(leng) if mode == 'Weight': tru_pet = tth['RiseTime'] t, c = np.unique(tru_pet, return_counts=True) pf0[t] = c pf1[pet] = pwe xlabel = '$PEnum/\mathrm{1}$' distd = '(W/ns,P/1)'; distl = 'pdist' Q = penum; q = np.sum(pwe) edist = np.abs(Q - q) * scipy.stats.poisson.pmf(Q, Q) elif mode == 'Charge': t = tth['RiseTime']; w = tth[mode] t = np.unique(t) c = np.array([
np.sum(w[tth['RiseTime'] == i])
numpy.sum
# Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from sklearn import datasets import flash from flash.core.classification import Labels from flash.template import TemplateData, TemplateSKLearnClassifier # 1. Download the data data_bunch = datasets.load_iris() # 2. Load the data datamodule = TemplateData.from_sklearn( train_bunch=data_bunch, val_split=0.8, ) # 3. Build the model model = TemplateSKLearnClassifier( num_features=datamodule.num_features, num_classes=datamodule.num_classes, serializer=Labels(), ) # 4. Create the trainer. trainer = flash.Trainer(max_epochs=1, limit_train_batches=1, limit_val_batches=1) # 5. Train the model trainer.fit(model, datamodule=datamodule) # 6. Save it! trainer.save_checkpoint("template_model.pt") # 7. Classify a few examples predictions = model.predict([ np.array([4.9, 3.0, 1.4, 0.2]),
np.array([6.9, 3.2, 5.7, 2.3])
numpy.array
from numba import njit import numpy as np from .utils import ( _choice, _logsumexp, _check_array_sums_to_1, _check_random_state, _argmax, check_sequences, ) __all__ = ["HMM"] class HMM: """Discrete Hidden Markov Model. The number of hidden and observable states are determined by the shapes of the probability matrices passed as parameters. Parameters ---------- init_probas : array-like of shape (n_hidden_states,) The initial probabilities. transitions : array-like of shape (n_hidden_states, n_hidden_states) The transition probabilities. ``transitions[i, j] = P(st+1 = j / st = i)``. emissions : array-like of shape (n_hidden_states, n_observable_states) The probabilities of symbol emission. ``emissions[i, o] = P(Ot = o / st = i)``. n_iter : int, default=10 Number of iterations to run for the EM algorithm (in ``fit()``). """ def __init__(self, init_probas, transitions, emissions, n_iter=10): self.init_probas = np.array(init_probas, dtype=np.float64) self.transitions = np.array(transitions, dtype=np.float64) self.emissions = np.array(emissions, dtype=np.float64) self.n_iter = n_iter self.n_hidden_states = self.A.shape[0] self.n_observable_states = self.B.shape[1] if not ( self.A.shape[0] == self.A.shape[1] == self.pi.shape[0] == self.B.shape[0] ): raise ValueError("inconsistent number of hidden states.") self._check_matrices_conditioning() def log_likelihood(self, sequences): """Compute log-likelihood of sequences. Parameters ---------- sequences : array-like of shape (n_seq, n_obs) or list (or numba typed list) \ of iterables of variable length The sequences of observable states Returns ------- log_likelihood : array of shape (n_seq,) """ total_log_likelihood = 0 sequences, n_obs_max = check_sequences(sequences, return_longest_length=True) log_alpha = np.empty(shape=(self.n_hidden_states, n_obs_max), dtype=np.float32) for seq in sequences: total_log_likelihood += self._forward(seq, log_alpha) return total_log_likelihood def decode(self, sequences, return_log_probas=False): """Decode sequences with Viterbi algorithm. Given a sequence of observable states, return the sequence of hidden states that most-likely generated the input. Parameters ---------- sequences : array-like of shape (n_seq, n_obs) or list (or numba typed list) \ of iterables of variable length The sequences of observable states return_log_probas : bool, default=False If True, log-probabilities of the joint sequences of observable and hidden states are returned Returns ------- best_paths : ndarray of shape (n_seq, n_obs) or list of ndarray of \ variable length The most likely sequences of hidden states. log_probabilities : ndarray of shape (n_seq,) log-probabilities of the joint sequences of observable and hidden states. Only present if ``return_log_probas`` is True. """ sequences, n_obs_max = check_sequences(sequences, return_longest_length=True) hidden_states_sequences = [] log_probas = [] log_V = np.empty(shape=(self.n_hidden_states, n_obs_max), dtype=np.float32) back_path = np.empty(shape=(self.n_hidden_states, n_obs_max), dtype=np.int32) for seq in sequences: n_obs = seq.shape[0] self._viterbi(seq, log_V, back_path) best_path = np.empty(n_obs, dtype=np.int32) log_proba = _get_best_path(log_V, back_path, best_path) hidden_states_sequences.append(best_path) if return_log_probas: log_probas.append(log_proba) if isinstance(sequences, np.ndarray): # All sequences have the same length hidden_states_sequences = np.array(hidden_states_sequences) if return_log_probas: return hidden_states_sequences, np.array(log_probas) else: return hidden_states_sequences def sample(self, n_seq=10, n_obs=10, random_state=None): """Sample sequences of hidden and observable states. Parameters ---------- n_seq : int, default=10 Number of sequences to sample n_obs : int, default=10 Number of observations per sequence random_state: int or np.random.RandomState instance, default=None Controls the RNG, see `scikt-learn glossary <https://scikit-learn.org/stable/glossary.html#term-random-state>`_ for details. Returns ------- hidden_states_sequences : ndarray of shape (n_seq, n_obs) observable_states_sequences : ndarray of shape (n_seq, n_obs) """ # TODO: allow n_obs_max rng = _check_random_state(random_state) sequences = np.array( [ _sample_one(n_obs, self.pi, self.A, self.B, seed=rng.tomaxint()) for _ in range(n_seq) ] ) # Unzip array of (hidden_states, observation) into tuple of arrays sequences = sequences.swapaxes(0, 1) return sequences[0], sequences[1] def fit(self, sequences): """Fit model to sequences. The probabilities matrices ``init_probas``, ``transitions`` and ``emissions`` are estimated with the EM algorithm. Parameters ---------- sequences : array-like of shape (n_seq, n_obs) or list (or numba typed list) \ of iterables of variable length The sequences of observable states Returns ------- self : HMM instance """ sequences, n_obs_max = check_sequences(sequences, return_longest_length=True) log_alpha = np.empty(shape=(self.n_hidden_states, n_obs_max)) log_beta = np.empty(shape=(self.n_hidden_states, n_obs_max)) # E[i, j, t] = P(st = i, st+1 = j / O, lambda) log_E = np.empty( shape=(self.n_hidden_states, self.n_hidden_states, n_obs_max - 1) ) # g[i, t] = P(st = i / O, lambda) log_gamma = np.empty(shape=(self.n_hidden_states, n_obs_max)) for _ in range(self.n_iter): self.pi, self.A, self.B = _do_EM_step( sequences, self._log_pi, self._log_A, self._log_B, log_alpha, log_beta, log_E, log_gamma, ) self._check_matrices_conditioning() return self def _viterbi(self, seq, log_V, back_path): # dummy wrapper for conveniency _viterbi(seq, self._log_pi, self._log_A, self._log_B, log_V, back_path) def _forward(self, seq, log_alpha): # dummy wrapper for conveniency return _forward(seq, self._log_pi, self._log_A, self._log_B, log_alpha) def _backward(self, seq, log_beta): # dummy wrapper for conveniency return _backward(seq, self._log_pi, self._log_A, self._log_B, log_beta) def _check_matrices_conditioning(self): _check_array_sums_to_1(self.pi, "init_probas") for s in range(self.n_hidden_states): _check_array_sums_to_1(self.A[s], f"Row {s} of A") _check_array_sums_to_1(self.B[s], f"Row {s} of B") # pi, A and B are respectively init_probas, transitions and emissions # matrices. _log_pi, _log_A and _log_B are updated each time pi, A, or B # are updated, respectively. Consider these private (and bug-prone :)), # Updating transitions would not update _log_A. @property def pi(self): return self.init_probas @pi.setter def pi(self, value): self.init_probas = value self._recompute_log_pi = True @property def _log_pi(self): if getattr(self, "_recompute_log_pi", True): self.__log_pi = np.log(self.pi) self._recompute_log_pi = False return self.__log_pi @property def A(self): return self.transitions @A.setter def A(self, value): self.transitions = value self._recompute_log_A = True @property def _log_A(self): if getattr(self, "_recompute_log_A", True): self.__log_A = np.log(self.A) self._recompute_log_A = False return self.__log_A @property def B(self): return self.emissions @B.setter def B(self, value): self.emissions = value self._recompute_log_B = True @property def _log_B(self): if getattr(self, "_recompute_log_B", True): self.__log_B = np.log(self.B) self._recompute_log_B = False return self.__log_B @njit(cache=True) def _sample_one(n_obs, pi, A, B, seed): """Return (observations, hidden_states) sample""" np.random.seed(seed) # local to this numba function, not global numpy observations = [] hidden_states = [] s = _choice(pi) for _ in range(n_obs): hidden_states.append(s) obs = _choice(B[s]) observations.append(obs) s = _choice(A[s]) return hidden_states, observations @njit(cache=True) def _forward(seq, log_pi, log_A, log_B, log_alpha): """Fill log_alpha array with log probabilities, return log-likelihood""" # alpha[i, t] = P(O1, ... Ot, st = i / lambda) # reccursion is alpha[i, t] = B[i, Ot] * sumj(alpha[j, t - 1] * A[i, j]) # which becomes (when applying log) # log_alpha[i, t] = log(B[i, Ot]) + # logsum_jexp(log_alpha[j, t - 1] + _log_A[i, j]) # since log(sum(ai . bj)) = # log(sum(exp(log_ai + log_bi))) n_obs = len(seq) n_hidden_states = log_pi.shape[0] log_alpha[:, 0] = log_pi + log_B[:, seq[0]] buffer =
np.empty(shape=n_hidden_states)
numpy.empty
import numpy as np import torch from torch.autograd import Variable from stats.tensor import tensor def fit(pdfs, parameters, observations, iter, lr): """Estimates the parameters of a mixture model via maximum likelihood maximization. Uses gradient descent for optimization. Parameters ---------- pdfs : List of callable pdfs Callable probability density functions (likelihood function) expecting an array of observations as the only argument. parameters : List of list List of list of parameters that are subject to optimization. e.g. for a bimodal gaussian mixture: [[mu_1, sigma_1], [mu_2, sigma_2]] observations : ndarray Observations from an unknown pdf which parameters are subject to be estimated iter : float Maximum number of iterations lr : float Gradient descent learning rate Returns ------- """ # number of models/classes in mixture K = len(parameters) # initialize mixing coefficients with random values mixcoeffs = np.random.rand(K) mixcoeffs /= np.sum(mixcoeffs) # make the coefficients visible to the update step for k in range(K): mixcoeff = Variable(tensor(mixcoeffs[k]), requires_grad=True) parameters[k].append(mixcoeff) for i in range(iter): likelihood = 0 for k in range(K): # multiply the likelihood with the mixing coefficients # mixing coefficient: p(z_k = 1) p_z = parameters[k][-1].expand(observations.size()) likelihood += pdfs[k](observations) * p_z expectation = torch.mean(torch.log(likelihood)) # add constraint sum(mixcoeffs) = 1 via lagrange multiplier for k in range(K): expectation -= 1.0 * parameters[k][-1] expectation += 1.0 # c = 1 if np.isnan(expectation.data[0]): raise RuntimeError('Singular state. Try different initial parameters') # Determine gradients expectation.backward() # Update parameters with gradient descent for k in range(K): for param in parameters[k]: param.data.add_(lr * param.grad.data) param.grad.data.zero_() return expectation.data[0] if __name__ == '__main__': from stats.distributions import Normal """ Estimate mean and std of a gaussian mixture model via MixtureModel-MLE on Kx10000 observations """
np.random.seed(0)
numpy.random.seed
import random #from skimage import io import numpy as np from glob import glob import SimpleITK as sitk from keras.utils import np_utils class Pipeline(object): def __init__(self, list_train ,Normalize=True): self.scans_train = list_train self.train_im=self.read_scans(Normalize) def read_scans(self,Normalize): train_im=[] for i in range(len( self.scans_train)): #if i%10==0: # print('iteration [{}]'.format(i)) #print(len(self.scans_train[i])) flair = glob( self.scans_train[i] + '/*_flair.nii.gz') t2 = glob( self.scans_train[i] + '/*_t2.nii.gz') gt = glob( self.scans_train[i] + '/*_seg.nii.gz') t1 = glob( self.scans_train[i] + '/*_t1.nii.gz') t1c = glob( self.scans_train[i] + '/*_t1ce.nii.gz') t1s=[scan for scan in t1 if scan not in t1c] #print(len(flair)+len(t2)+len(gt)+len(t1s)+len(t1c)) if (len(flair)+len(t2)+len(gt)+len(t1s)+len(t1c))<5: print("there is a problem here!!! the problem lies in this patient :", self.scans_train[i]) continue scans = [flair[0], t1s[0], t1c[0], t2[0], gt[0]] #read a volume composed of 4 modalities tmp = [sitk.GetArrayFromImage(sitk.ReadImage(scans[k])) for k in range(len(scans))] #crop each volume to have a size of (146,192,152) to discard some unwanted background and thus save some computational power ;) z0=1 y0=29 x0=42 z1=147 y1=221 x1=194 tmp=np.array(tmp) tmp=tmp[:,z0:z1,y0:y1,x0:x1] #normalize each slice if Normalize==True: tmp=self.norm_slices(tmp) train_im.append(tmp) del tmp #print(np.array(train_im).shape) return np.array(train_im) def sample_patches_randomly(self, num_patches, d , h , w ): ''' INPUT: num_patches : the total number of samled patches d : this correspnds to the number of channels which is ,in our case, 4 MRI modalities h : height of the patch w : width of the patch OUTPUT: patches : np array containing the randomly sampled patches labels : np array containing the corresping target patches ''' patches, labels = [], [] count = 0 #swap axes to make axis 0 represents the modality and axis 1 represents the slice. take the ground truth gt_im = np.swapaxes(self.train_im, 0, 1)[4] #print(gt_im.shape) #take flair image as mask msk = np.swapaxes(self.train_im, 0, 1)[0] #save the shape of the grounf truth to use it afterwards tmp_shp = gt_im.shape #reshape the mask and the ground truth to 1D array gt_im = gt_im.reshape(-1).astype(np.uint8) msk = msk.reshape(-1).astype(np.float32) # maintain list of 1D indices while discarding 0 intensities indices = np.squeeze(np.argwhere((msk!=-9.0) & (msk!=0.0))) del msk # shuffle the list of indices of the class np.random.shuffle(indices) #reshape gt_im gt_im = gt_im.reshape(tmp_shp) #a loop to sample the patches from the images i = 0 pix = len(indices) while (count<num_patches) and (pix>i): #randomly choose an index ind = indices[i] i+= 1 #reshape ind to 3D index ind = np.unravel_index(ind, tmp_shp) # get the patient and the slice id patient_id = ind[0] slice_idx=ind[1] p = ind[2:] #construct the patch by defining the coordinates p_y = (p[0] - (h)/2, p[0] + (h)/2) p_x = (p[1] - (w)/2, p[1] + (w)/2) p_x=list(map(int,p_x)) p_y=list(map(int,p_y)) #take patches from all modalities and group them together tmp = self.train_im[patient_id][0:4, slice_idx,p_y[0]:p_y[1], p_x[0]:p_x[1]] #take the coresponding label patch lbl=gt_im[patient_id,slice_idx,p_y[0]:p_y[1], p_x[0]:p_x[1]] #keep only paches that have the desired size if tmp.shape != (d, h, w) : continue patches.append(tmp) labels.append(lbl) count+=1 patches = np.array(patches) labels=np.array(labels) return patches, labels def norm_slices(self,slice_not): ''' normalizes each slice , excluding gt subtracts mean and div by std dev for each slice clips top and bottom one percent of pixel intensities ''' normed_slices = np.zeros((5, 146, 192, 152)).astype(np.float32) for slice_ix in range(4): normed_slices[slice_ix] = slice_not[slice_ix] for mode_ix in range(146): normed_slices[slice_ix][mode_ix] = self._normalize(slice_not[slice_ix][mode_ix]) normed_slices[-1]=slice_not[-1] return normed_slices def _normalize(self,slice): ''' input: unnormalized slice OUTPUT: normalized clipped slice ''' b = np.percentile(slice, 99) t = np.percentile(slice, 1) slice = np.clip(slice, t, b) image_nonzero = slice[np.nonzero(slice)] if np.std(slice)==0 or np.std(image_nonzero) == 0: return slice else: tmp= (slice - np.mean(image_nonzero)) / np.std(image_nonzero) #since the range of intensities is between 0 and 5000 ,the min in the normalized slice corresponds to 0 intensity in unnormalized slice #the min is replaced with -9 just to keep track of 0 intensities so that we can discard those intensities afterwards when sampling random patches tmp[tmp==tmp.min()]=-9 return tmp ''' def save_image_png (img,output_file="img.png"): """ save 2d image to disk in a png format """ img=np.array(img).astype(np.float32) if np.max(img) != 0: img /= np.max(img) # set values < 1 if np.min(img) <= -1: # set values > -1 img /= abs(np.min(img)) io.imsave(output_file, img) ''' ''' def concatenate (): concatenate two parts into one dataset this can be avoided if there is enough RAM as we can directly from the whole dataset Y_labels_2=np.load("y_dataset_second_part.npy").astype(np.uint8) X_patches_2=np.load("x_dataset_second_part.npy").astype(np.float32) Y_labels_1=np.load("y_dataset_first_part.npy").astype(np.uint8) X_patches_1=np.load("x_dataset_first_part.npy").astype(np.float32) #concatenate both parts X_patches=np.concatenate((X_patches_1, X_patches_2), axis=0) Y_labels=np.concatenate((Y_labels_1, Y_labels_2), axis=0) del Y_labels_2,X_patches_2,Y_labels_1,X_patches_1 #shuffle the whole dataset shuffle = list(zip(X_patches, Y_labels)) np.random.seed(138) np.random.shuffle(shuffle) X_patches = np.array([shuffle[i][0] for i in range(len(shuffle))]) Y_labels = np.array([shuffle[i][1] for i in range(len(shuffle))]) del shuffle np.save( "x_training.npy",X_patches.astype(np.float32) ) np.save( "y_training.npy",Y_labels.astype(np.uint8)) #np.save( "x_valid",X_patches_valid.astype(np.float32) ) #np.save( "y_valid",Y_labels_valid.astype(np.uint8)) ''' def whole_return(val = False): while True: try: if val == True: path_all = glob('/home/parth/Interpretable_ML/BraTS_2018/val/**') else: path_all = glob('/home/parth/Interpretable_ML/BraTS_2018/train/**') index = random.randint(0, len(path_all)+1) #print(path_all[index])
np.random.shuffle(path_all)
numpy.random.shuffle
"""Test suite for lusmu.vector Copyright 2013 Eniram Ltd. See the LICENSE file at the top-level directory of this distribution and at https://github.com/akaihola/lusmu/blob/master/LICENSE """ import tempfile from unittest import TestCase from mock import patch import joblib from nose.tools import assert_raises, eq_ import numpy as np import pandas as pd from lusmu.core import DIRTY from lusmu.tests.test_core import (NoOutputTypeAction, NoneOutputTypeAction, IntOutputTypeAction) from lusmu.vector import Input from lusmu import vector from lusmu.tests.tools import parameterize def sum(*args): return sum(args) class VectorEq(vector.VectorEquality): """Mock node class implementing the vector equality test""" def __init__(self, value): self._value = value def test_scalar_equality(): """Test cases for lusmu.vector.VectorEq._value_eq() with Python scalars""" @parameterize def check(value, other_value, expected): """Scalar node value {0} == {1}: {2}""" # pylint: disable=W0212 # Access to a protected member of a client class vector = VectorEq(value) assert expected == vector._value_eq(other_value) yield check(DIRTY, DIRTY, True) yield check(DIRTY, 0, False) yield check(0, 0, True) yield check(0, 1, False) yield check(0, 0.0, False) yield check(0, 1.0, False) yield check(0.0, 0.0, True) yield check(0.0, 1.0, False) yield check('a', 'a', True) yield check('a', 'b', False) def test_numpy_vector_equality(): """Test cases for lusmu.vector.VectorEq._value_eq() with numpy arrays""" @parameterize def check(value, other_value, expected): """Vector node value {0} == {1}: {2}""" # pylint: disable=W0212 # Access to a protected member of a client class vector = VectorEq(
np.array(value)
numpy.array
import numpy as np from meshparty import utils from scipy import spatial, sparse from dataclasses import dataclass, fields, asdict, make_dataclass try: from pykdtree.kdtree import KDTree as pyKDTree except: pyKDTree = spatial.cKDTree from meshparty import skeleton_io from collections.abc import Iterable from .skeleton_utils import resample_path def _metadata_from_dict( meta_dict, dataclass_name="MetaMetadata", ): meta = make_dataclass(dataclass_name, fields=meta_dict.keys()) return meta(**meta_dict) @dataclass class SkeletonMetadata: root_id: int = None soma_pt_x: float = None soma_pt_y: float = None soma_pt_z: float = None soma_radius: float = None collapse_soma: bool = None collapse_function: str = None invalidation_d: float = None smooth_vertices: bool = None compute_radius: bool = None shape_function: str = None smooth_iterations: int = None smooth_neighborhood: int = None smooth_r: float = None cc_vertex_thresh: int = None remove_zero_length_edges: bool = None collapse_params: dict = None timestamp: float = None skeleton_type: str = None meta: object = None # Fields used for skeletonization _skeletonize_fields = [ "soma_pt", "soma_radius", "collapse_soma", "collapse_function", "invalidation_d", "smooth_vertices", "compute_radius", "shape_function", "smooth_iterations", "smooth_neighborhood", "smooth_r", "cc_vertex_thresh", "remove_zero_length_edges", "collapse_params", ] def __init__(self, **kwargs): names = [f.name for f in fields(self)] if kwargs.get("meta") is not None: setattr( self, "meta", _metadata_from_dict(kwargs.pop("meta"), "MetaMetadata") ) for k, v in kwargs.items(): if k in names: if isinstance(v, np.ndarray): v = v.tolist() setattr(self, k, v) def skeletonize_kwargs(self): params = asdict(self) # reassemble soma point into list soma_pt = [ params.pop("soma_pt_x"), params.pop("soma_pt_y"), params.pop("soma_pt_z"), ] if soma_pt[0] is not None: params["soma_pt"] = soma_pt else: params["soma_pt"] = None for k in list(params.keys()): if k not in self._skeletonize_fields: params.pop(k) return params def update_metameta(self, metameta): if self.meta is not None: meta_dict = asdict(self.meta) else: meta_dict = {} meta_dict.update(metameta) setattr(self, "meta", _metadata_from_dict(meta_dict, "MetaMetadata")) pass class StaticSkeleton: def __init__( self, vertices, edges, root=None, radius=None, mesh_to_skel_map=None, mesh_index=None, vertex_properties=None, voxel_scaling=None, ): self._vertices = vertices self._edges = edges self._root = None self._radius = radius self._mesh_to_skel_map = mesh_to_skel_map self._mesh_index = mesh_index self._parent_node_array = None self._distance_to_root = None self._csgraph = None self._csgraph_binary = None self._voxel_scaling = voxel_scaling if root is None: self._create_default_root() else: self.reroot(root, reset_other_components=True) self._reset_derived_properties() self.vertex_properties = vertex_properties @property def vertices(self): return self._vertices @property def edges(self): return self._edges @property def mesh_to_skel_map(self): return self._mesh_to_skel_map @property def voxel_scaling(self): if self._voxel_scaling is None: return None else: return np.array(self._voxel_scaling) @voxel_scaling.setter def voxel_scaling(self, new_scaling): self._vertices = self._vertices * self.inverse_voxel_scaling if new_scaling is not None: self._vertices = self._vertices * np.array(new_scaling).reshape(3) self._voxel_scaling = new_scaling self._reset_derived_properties() @property def inverse_voxel_scaling(self): if self.voxel_scaling is None: return np.array([1, 1, 1]) else: return 1 / self.voxel_scaling @property def n_vertices(self): """ int : Number of vertices in the skeleton """ return len(self.vertices) @property def root(self): """ int : Index of the skeleton root """ if self._root is None: self._create_default_root() return self._root @property def radius(self): if self._radius is None: return None else: return self._radius @radius.setter def radius(self, new_values): if len(new_values) == self.n_vertices: self._radius = np.array(new_values).reshape(self.n_vertices) @property def mesh_index(self): return self._mesh_index def _create_default_root(self): temp_graph = utils.create_csgraph( self.vertices, self.edges, euclidean_weight=True, directed=False ) r = utils.find_far_points_graph(temp_graph) self.reroot(int(r[0]), reset_other_components=True) def reroot(self, new_root, reset_other_components=False): """Change the skeleton root index. Parameters ---------- new_root : Int Skeleton vertex index to be the new root. reset_other_components : Bool Orders non-root components accoring to a local default "root". Should not often be set to True by a user. """ if new_root > self.n_vertices: raise ValueError("New root must correspond to a skeleton vertex index") self._root = int(new_root) self._parent_node_array = np.full(self.n_vertices, None) _, lbls = sparse.csgraph.connected_components(self.csgraph_binary) root_comp = lbls[new_root] if reset_other_components: comps_to_reroot = np.unique(lbls) else: comps_to_reroot = [root_comp] # The edge list has to be treated like an undirected graph for comp in comps_to_reroot: if comp == root_comp: comp_root = new_root else: comp_root = utils.find_far_points_graph( self.csgraph_binary, start_ind=np.flatnonzero(lbls == comp)[0], multicomponent=True, )[0] d = sparse.csgraph.dijkstra( self.csgraph_binary, directed=False, indices=comp_root ) # Make edges in edge list orient as [child, parent] # Where each child only has one parent # And the root has no parent. (Thus parent is closer than child) edge_slice = np.any( np.isin(self.edges, np.flatnonzero(lbls == comp)), axis=1 ) edges = self.edges[edge_slice] is_ordered = d[edges[:, 0]] > d[edges[:, 1]] e1 = np.where(is_ordered, edges[:, 0], edges[:, 1]) e2 = np.where(is_ordered, edges[:, 1], edges[:, 0]) self._edges[edge_slice] = np.stack((e1, e2)).T self._parent_node_array[e1] = e2 self._reset_derived_properties() ###################### # Derived properties # ###################### def _reset_derived_properties(self): self._csgraph = None self._csgraph_binary = None self._distance_to_root = None @property def csgraph(self): if self._csgraph is None: self._csgraph = utils.create_csgraph( self.vertices, self.edges, euclidean_weight=True, directed=True ) return self._csgraph @property def csgraph_binary(self): if self._csgraph_binary is None: self._csgraph_binary = utils.create_csgraph( self.vertices, self.edges, euclidean_weight=False, directed=True ) return self._csgraph_binary @property def csgraph_undirected(self): return self.csgraph + self.csgraph.T @property def csgraph_binary_undirected(self): return self.csgraph_binary + self.csgraph_binary.T def parent_nodes(self, vinds): """Get a list of parent nodes for specified vertices Parameters ---------- vinds : Collection of ints Collection of vertex indices Returns ------- numpy.array The parent node of each vertex index in vinds. """ if isinstance(vinds, list): vinds = np.array(vinds) return self._parent_node_array[vinds] @property def distance_to_root(self): """np.array : N length array with the distance to the root node along the skeleton.""" if self._distance_to_root is None: self._distance_to_root = sparse.csgraph.dijkstra( self.csgraph, directed=False, indices=self.root ) return self._distance_to_root def path_to_root(self, v_ind): """ Gives the path to root from a specified vertex. Parameters ---------- v_ind : int Vertex index Returns ------- numpy.array : Ordered set of indices from v_ind to root, inclusive of both. """ path = [v_ind] ind = v_ind ind = self._parent_node_array[ind] while ind is not None: path.append(ind) ind = self._parent_node_array[ind] return np.array(path) class Skeleton: def __init__( self, vertices, edges, root=None, radius=None, mesh_to_skel_map=None, mesh_index=None, vertex_properties={}, node_mask=None, voxel_scaling=None, remove_zero_length_edges=True, skeleton_index=None, meta={}, ): if remove_zero_length_edges: zlsk = utils.collapse_zero_length_edges( vertices, edges, root, radius, mesh_to_skel_map, mesh_index, node_mask, vertex_properties, ) ( vertices, edges, root, radius, mesh_to_skel_map, mesh_index, node_mask, vertex_properties, ) = zlsk self._rooted = StaticSkeleton( vertices, edges, radius=radius, mesh_to_skel_map=mesh_to_skel_map, mesh_index=mesh_index, vertex_properties=vertex_properties, root=root, voxel_scaling=voxel_scaling, ) self._node_mask = np.full(self._rooted.n_vertices, True) self._edges = None self._SkeletonIndex = skeleton_index # Derived properties of the filtered graph self._csgraph_filtered = None self._cover_paths = None self._segments = None self._segment_map = None self._kdtree = None self._pykdtree = None self._reset_derived_properties_filtered() self.vertex_properties = vertex_properties if isinstance(meta, SkeletonMetadata): self._meta = meta else: self._meta = SkeletonMetadata(**meta) if node_mask is not None: self.apply_mask(node_mask, in_place=True) @property def meta(self): return self._meta ################### # Mask properties # ################### @property def SkeletonIndex(self): if self._SkeletonIndex is None: self._SkeletonIndex = np.array return self._SkeletonIndex def _register_skeleton_index(self, NewSkeletonIndex): self._SkeletonIndex = NewSkeletonIndex @property def node_mask(self): return self._node_mask def copy(self): return Skeleton( self._rooted.vertices, self._rooted.edges, mesh_to_skel_map=self._rooted.mesh_to_skel_map, vertex_properties=self._rooted.vertex_properties, root=self._rooted.root, node_mask=self.node_mask, radius=self._rooted.radius, voxel_scaling=self.voxel_scaling, skeleton_index=self._SkeletonIndex, mesh_index=self._rooted.mesh_index, remove_zero_length_edges=False, meta=self.meta, ) def apply_mask(self, new_mask, in_place=False): if in_place: sk = self else: sk = self.copy() if len(new_mask) == len(sk.vertices): all_false = np.full(len(sk.node_mask), False) all_false[sk.node_mask] = new_mask new_mask = all_false sk._node_mask = new_mask sk._reset_derived_properties_filtered() if in_place is False: return sk def reset_mask(self, in_place=False): true_mask = np.full(self.unmasked_size, True) out = self.apply_mask(true_mask, in_place=in_place) if in_place is False: return out def mask_from_indices(self, mask_indices): new_mask = np.full(self._rooted.n_vertices, False) new_mask[self.map_indices_to_unmasked(mask_indices)] = True return new_mask @property def indices_unmasked(self): """ np.array: Gets the indices of nodes in the filtered mesh in the unmasked index array """ return np.flatnonzero(self.node_mask) @property def unmasked_size(self): return len(self._rooted.vertices) def map_indices_to_unmasked(self, unmapped_indices): """ For a set of masked indices, returns the corresponding unmasked indices Parameters ---------- unmapped_indices: np.array a set of indices in the masked index space Returns ------- np.array the indices mapped back to the original mesh index space """ return utils.map_indices_to_unmasked(self.indices_unmasked, unmapped_indices) def map_boolean_to_unmasked(self, unmapped_boolean): """ For a boolean index in the masked indices, returns the corresponding unmasked boolean index Parameters ---------- unmapped_boolean : np.array a bool array in the masked index space Returns ------- np.array a bool array in the original index space. Is True if the unmapped_boolean suggests it should be. """ return utils.map_boolean_to_unmasked( self.unmasked_size, self.node_mask, unmapped_boolean ) def filter_unmasked_boolean(self, unmasked_boolean): """ For an unmasked boolean slice, returns a boolean slice filtered to the masked mesh Parameters ---------- unmasked_boolean : np.array a bool array in the original mesh index space Returns ------- np.array returns the elements of unmasked_boolean that are still relevant in the masked index space """ return utils.filter_unmasked_boolean(self.node_mask, unmasked_boolean) def filter_unmasked_indices(self, unmasked_shape, mask=None): """ filters a set of indices in the original mesh space and returns it in the masked space Parameters ---------- unmasked_shape: np.array a set of indices into vertices in the unmasked index space mask: np.array or None the mask to apply. default None will use this Mesh node_mask Returns ------- np.array the unmasked_shape indices mapped into the masked index space """ if mask is None: mask = self.node_mask return utils.filter_unmasked_indices(mask, unmasked_shape) def filter_unmasked_indices_padded(self, unmasked_shape, mask=None): """ filters a set of indices in the original mesh space and returns it in the masked space Parameters ---------- unmasked_shape: np.array a set of indices into vertices in the unmasked index space mask: np.array or None the mask to apply. default None will use this Mesh node_mask Returns ------- np.array the unmasked_shape indices mapped into the masked index space, with -1 where the original index did not map into the masked mesh. """ if mask is None: mask = self.node_mask return utils.filter_unmasked_indices_padded(mask, unmasked_shape) #################### # Basic properties # #################### @property def vertices(self): if self._vertices is None: self._vertices = self._rooted.vertices[self.node_mask] return self._vertices @vertices.setter def vertices(self, new_vertices): new_vertices = np.atleast_2d(new_vertices) if new_vertices.shape[1] != 3: raise ValueError("New vertices must be 3 dimensional") if len(new_vertices) == self._rooted.n_vertices: self._rooted._vertices = new_vertices elif len(new_vertices) == self.n_vertices: self._rooted._vertices[self.node_mask] = new_vertices else: raise ValueError("New vertices must be the same size as existing vertices") self._reset_derived_properties_rooted() self._reset_derived_properties_filtered(index_changed=False) @property def edges(self): if self._edges is None: self._edges = self.filter_unmasked_indices(self._rooted.edges) return self._edges @property def n_vertices(self): """ int : Number of vertices in the skeleton """ return len(self.vertices) @property def mesh_to_skel_map(self): """numpy.array : N_mesh length array giving the associated skeleton vertex for each mesh vertex""" if self._rooted.mesh_to_skel_map is None: return None else: return self.filter_unmasked_indices_padded(self._rooted.mesh_to_skel_map) @property def mesh_to_skel_map_base(self): """numpy.array : N_mesh length array giving the associated skeleton vertex for each mesh vertex""" if self._rooted.mesh_to_skel_map is None: return None else: return self._rooted.mesh_to_skel_map @property def radius(self): if self._rooted.radius is None: return None return self._rooted.radius[self.node_mask] @property def mesh_index(self): if self._rooted.mesh_index is None: return None return self._rooted.mesh_index[self.node_mask] @property def csgraph(self): return self._rooted.csgraph[:, self.node_mask][self.node_mask] @property def csgraph_binary(self): return self._rooted.csgraph_binary[:, self.node_mask][self.node_mask] @property def csgraph_undirected(self): return self._rooted.csgraph_undirected[:, self.node_mask][self.node_mask] @property def csgraph_binary_undirected(self): return self._rooted.csgraph_binary_undirected[:, self.node_mask][self.node_mask] ################## # Voxel scalings # ################## @property def voxel_scaling(self): return self._rooted.voxel_scaling @voxel_scaling.setter def voxel_scaling(self, new_scaling): self._rooted.voxel_scaling = new_scaling self._reset_derived_properties_filtered(index_changed=False) ##################### # Rooted properties # ##################### def _reset_derived_properties_rooted(self): self._rooted._reset_derived_properties() def _create_default_root(self): temp_graph = utils.create_csgraph( self._rooted.vertices, self._rooted.edges, euclidean_weight=True, directed=False, ) r = utils.find_far_points_graph(temp_graph) self._rooted.reroot(int(r[0])) @property def root(self): return self.SkeletonIndex( self.filter_unmasked_indices_padded(self._rooted.root) ) @property def root_position(self): return self._rooted.vertices[self._rooted.root] def reroot(self, new_root): self._rooted.reroot(self.map_indices_to_unmasked(new_root)) self._reset_derived_properties_filtered() @property def distance_to_root(self): "Distance to root (even if root is not in the mask)" return self._rooted.distance_to_root[self.node_mask] def path_to_root(self, v_ind): "Path stops if it leaves masked region" path_b = self._rooted.path_to_root(self.map_indices_to_unmasked(v_ind)) path_filt = self.filter_unmasked_indices_padded(path_b) if np.any(path_filt == -1): last_ind = np.flatnonzero(path_filt == -1)[0] else: last_ind = len(path_filt) return self.SkeletonIndex(path_filt[:last_ind]) ####################### # Filtered properties # ####################### def _reset_derived_properties_filtered(self, index_changed=True): self._vertices = None self._edges = None self._kdtree = None self._pykdtree = None if index_changed: self._branch_points = None self._end_points = None self._segment_map = None self._SkeletonIndex = None self._cover_paths = None ######################### # Geometric quantitites # ######################### @property def kdtree(self): """ scipy.spatial.kdtree : k-D tree from scipy.spatial. """ if self._kdtree is None: self._kdtree = spatial.cKDTree(self.vertices) return self._kdtree @property def pykdtree(self): if self._pykdtree is None: self._pykdtree = pyKDTree(self.vertices) return self._pykdtree def _single_path_length(self, path): """Compute the length of a single path (assumed to be correct)""" path = np.unique(path) return np.sum(self.csgraph[:, path][path]) def path_length(self, paths=None): """Returns the length of a path (described as an ordered collection of connected indices) Parameters ---------- path : Path or collection of paths, a path being an ordered list of linked vertex indices. Returns ------- Float or list of floats : The length of each path. """ if paths is None: paths = np.arange(self.n_vertices) if len(paths) == 0: return 0 if isinstance(paths[0], Iterable): Ls = [] for path in paths: Ls.append(self._single_path_length(path)) else: Ls = self._single_path_length(paths) return Ls ################################ # Topological split properties # ################################ def _create_branch_and_end_points(self): """Pre-compute branch and end points from the graph""" n_children = np.sum(self.csgraph_binary > 0, axis=0).squeeze() self._branch_points = np.flatnonzero(n_children > 1) self._end_points = np.flatnonzero(n_children == 0) @property def branch_points(self): """ numpy.array : Indices of branch points on the skeleton (pottentially including root)""" if self._branch_points is None: self._create_branch_and_end_points() return self.SkeletonIndex(self._branch_points) @property def end_points(self): """ numpy.array : Indices of end points on the skeleton (pottentially including root)""" if self._end_points is None: self._create_branch_and_end_points() return self.SkeletonIndex(self._end_points) @property def topo_points(self): return self.SkeletonIndex(
np.concatenate([self.end_points, self.branch_points, [self.root]])
numpy.concatenate
#!/usr/bin/python # -*- coding: utf-8 -*- """Random vibration theory (RVT) based motions.""" import numpy as np from scipy.stats import linregress from scipy.interpolate import interp1d from . import peak_calculators DEFAULT_CALC = 'V75' def sort_increasing(*args): """Sort arrays such that they are increasing. Check if the first array is is increasing, if not reverse the order. Same operation is applied to additional arrays. Parameters ---------- args : array_like arrays to be re-ordered. Returns ------- tuple tuple containing sorted :class:`numpy.ndarray`'s. Raises ------ :class:`NotImplementedError` If first array is not monotonic. """ diffs = np.diff(args[0]) if np.all(diffs >= 0): # All increasing, do nothing pass elif np.all(diffs <= 0): # All decreasing, reverse args = [a[::-1] for a in args] else: raise NotImplementedError('Values are not regularly ordered.') return args def log_spaced_values(lower, upper, per_decade=512): """Generate values with constant log-spacing. Parameters ---------- lower : float lower end of the range. upper : float upper end of the range. per_decade : int, optional number of points per decade. Default is 512 points per decade. Returns ------- values : :class:`numpy.ndarray` Log-spaced values. """ lower = np.log10(lower) upper = np.log10(upper) count = np.ceil(per_decade * (upper - lower)) return np.logspace(lower, upper, count) def calc_sdof_tf(freqs, osc_freq, osc_damping): """Single-degree-of-freedom transfer function. When applied on the acceleration Fourier amplitude spectrum, it provides the pseudo-spectral acceleration. Parameters ---------- freqs : array_like Frequencies at which the transfer function should be calculated (Hz). osc_freq : float Frequency of the oscillator (Hz). osc_damping : float Fractional damping of the oscillator (decimal). Returns ------- :class:`numpy.ndarray` Complex valued transfer function. """ freqs = np.asarray(freqs) return ( -osc_freq ** 2. / (freqs ** 2 - osc_freq ** 2 - 2.j * osc_damping * osc_freq * freqs)) def calc_stress_drop(magnitude): """Stress drop using Atkinson & Boore (2011, :cite:`atkinson11`) model. Parameters ---------- magnitude : float Moment magnitude of the stress drop. Returns ------- stress_drop : float Stress drop (bars). """ return 10 ** (3.45 - 0.2 * max(magnitude, 5.)) def calc_geometric_spreading(dist, params): """Geometric spreading defined by piece-wise linear model. Parameters ---------- dist : float Closest distance to the rupture surface (km). params : List[(float,Optional[float])] List of (slope, limit) tuples that define the attenuation. For an infinite distance use `None`. For example, [(1, `None`)] would provide for 1/R geometric spreading to an infinite distance. Returns ------- coeff : float Geometric spreading coefficient. """ initial = 1 coeff = 1 for slope, limit in params: # Compute the distance limited by the maximum distance of the slope. _dist = min(dist, limit) if limit else dist coeff *= (initial / _dist) ** slope if _dist < dist: initial = _dist else: break return coeff class RvtMotion(object): """Random vibration theory motion. Parameters ---------- freqs : array_like, optional Frequency array (Hz). fourier_amps : array_like, optional Absolute value of acceleration Fourier amplitudes. duration : float, optional Ground motion duration (sec). peak_calculator : :class:`~.peak_calculators.Calculator`, optional Peak calculator to use. If `None`, then the default peak calculator is used. The peak calculator may either be specified by a :class:`~.peak_calculators.Calculator` object, or by the initials of the calculator using :func:`~.peak_calculators.peak_calculator`. calc_kwds : dict, optional Keywords to be passed during the creation the peak calculator. These keywords are only required for some peak calculators. """ def __init__(self, freqs=None, fourier_amps=None, duration=None, peak_calculator=None, calc_kwds=None): """Initialize the class.""" self._freqs = freqs self._fourier_amps = fourier_amps self._duration = duration if self._freqs is not None: self._freqs, self._fourier_amps = sort_increasing( self._freqs, self._fourier_amps) if isinstance(peak_calculator, peak_calculators.Calculator): self.peak_calculator = peak_calculator else: self.peak_calculator = peak_calculators.get_peak_calculator( peak_calculator or DEFAULT_CALC, calc_kwds) @property def freqs(self): """Frequency values (Hz).""" return self._freqs @property def fourier_amps(self): """Acceleration Fourier amplitude values (g-sec).""" return self._fourier_amps @property def duration(self): """Duration of the ground motion for RVT analysis.""" return self._duration def calc_osc_accels(self, osc_freqs, osc_damping=0.05, trans_func=None): """Pseudo-acceleration spectral response of an oscillator. Parameters ---------- osc_freq : float Frequency of the oscillator (Hz). osc_damping : float Fractional damping of the oscillator (dec). For example, 0.05 for a damping ratio of 5%. trans_func : array_like, optional Transfer function to be applied to motion prior calculation of the oscillator response. Returns ------- spec_accels : :class:`numpy.ndarray` Peak pseudo-spectral acceleration of the oscillator """ if trans_func is None: trans_func = np.ones_like(self.freqs) else: trans_func = np.asarray(trans_func) resp = np.array([ self.calc_peak(trans_func * calc_sdof_tf(self.freqs, of, osc_damping), of, osc_damping) for of in osc_freqs ]) return resp def calc_peak(self, transfer_func=None, osc_freq=None, osc_damping=None): """Compute the peak response. Parameters ---------- transfer_func : array_like, optional Transfer function to apply to the motion. If ``None``, then no transfer function is applied. osc_freq : float Frequency of the oscillator (Hz). osc_damping : float Fractional damping of the oscillator (dec). For example, 0.05 for a damping ratio of 5%. Returns ------- peak : float Calculated peak """ if transfer_func is None: fourier_amps = self._fourier_amps else: fourier_amps = np.abs(transfer_func) * self._fourier_amps return self.peak_calculator( self._duration, self._freqs, fourier_amps, osc_freq=osc_freq, osc_damping=osc_damping)[0] def calc_attenuation(self, min_freq, max_freq=None): r"""Compute the site attenuation (κ) based on a log-linear fit. Parameters ---------- min_freq : float minimum frequency of the fit (Hz). max_freq : float, optional maximum frequency of the fit. If ``None``, then the maximum frequency range is used. Returns ------- atten : float attenuation parameter. r_sqr : float squared correlation coefficient of the fit (R²). See :func:`scipy.stats.linregress`. freqs : :class:`numpy.ndarray` selected frequencies fitted : :class:`numpy.ndarray` fitted values Notes ----- This function computes the site attenuation defined by Anderson & Hough (1984, :cite:`anderson84`) as: .. math:: a(f) = A_0 \exp(-\pi \kappa f) \text( for ) f > f_E for a single Fourier amplitude spectrum """ max_freq = max_freq or self.freqs[-1] mask = (min_freq <= self.freqs) & (self.freqs <= max_freq) slope, intercept, r_value, p_value, stderr = linregress( self.freqs[mask], np.log(self.fourier_amps[mask])) atten = slope / -np.pi freqs = self.freqs[mask] fitted = np.exp(intercept + slope * freqs) return atten, r_value ** 2, freqs, fitted class SourceTheoryMotion(RvtMotion): """Single-corner source theory model. The single-corner source theory model uses default parameters from Campbell (2003, :cite:`campbell03`). """ def __init__(self, magnitude, distance, region, stress_drop=None, depth=8, peak_calculator=None, calc_kwds=None): """Initialize the motion. Parameters ---------- magnitude : float Moment magnitude of the event. distance : float Epicentral distance (km). region : str Region for the parameters. Either 'cena' for Central and Eastern North America, or 'wna' for Western North America. stress_drop : float, optional Stress drop of the event (bars). If `None`, then the default value is used. For `region` is 'cena', the default value is computed by the :cite:`atkinson11` model, while for `region` is 'wna' the default value is 100 bars. depth : float, optional Hypocenter depth (km). The `depth` is combined with the `distance` to compute the hypocentral distance. peak_calculator : :class:`~.peak_calculators.Calculator`, optional Peak calculator to use. If `None`, then the default peak calculator is used. The peak calculator may either be specified by a :class:`~.peak_calculators.Calculator` object, or by the initials of the calculator using :func:`~.peak_calculators.peak_calculator`. calc_kwds : dict, optional Keywords to be passed during the creation the peak calculator. These keywords are only required for some peak calculators. """ super().__init__(peak_calculator=peak_calculator, calc_kwds=calc_kwds) self.magnitude = magnitude self.distance = distance self.region = peak_calculators.get_region(region) if self.region == 'wna': # Default parameters for the WUS from Campbell (2003) self.shear_velocity = 3.5 self.path_atten_coeff = 180. self.path_atten_power = 0.45 self.density = 2.8 self.site_atten = 0.04 self.geometric_spreading = [(1, 40), (0.5, None)] if stress_drop: self.stress_drop = stress_drop else: self.stress_drop = 100. # Crustal amplification from Campbell (2003) using the # log-frequency and the amplification based on a quarter-wave # length approximation self.site_amp = interp1d( np.log([ 0.01, 0.09, 0.16, 0.51, 0.84, 1.25, 2.26, 3.17, 6.05, 16.60, 61.20, 100.00 ]), [ 1.00, 1.10, 1.18, 1.42, 1.58, 1.74, 2.06, 2.25, 2.58, 3.13, 4.00, 4.40 ], bounds_error=False) elif self.region == 'cena': # Default parameters for the CEUS from Campbell (2003) self.shear_velocity = 3.6 self.density = 2.8 self.path_atten_coeff = 680. self.path_atten_power = 0.36 self.site_atten = 0.006 self.geometric_spreading = [(1, 70), (0, 130), (0.5, None)] if stress_drop: self.stress_drop = stress_drop else: self.stress_drop = calc_stress_drop(magnitude) # Crustal amplification from Campbell (2003) using the # log-frequency and the amplification based on a quarter-wave # length approximation self.site_amp = interp1d( np.log([ 0.01, 0.10, 0.20, 0.30, 0.50, 0.90, 1.25, 1.80, 3.00, 5.30, 8.00, 14.00, 30.00, 60.00, 100.00 ]), [ 1.00, 1.02, 1.03, 1.05, 1.07, 1.09, 1.11, 1.12, 1.13, 1.14, 1.15, 1.15, 1.15, 1.15, 1.15 ], bounds_error=False) else: raise NotImplementedError # Depth to rupture self.depth = depth self.hypo_distance = np.sqrt(self.distance ** 2. + self.depth ** 2.) # Constants self.seismic_moment = 10. ** (1.5 * (self.magnitude + 10.7)) self.corner_freq = (4.9e6 * self.shear_velocity * (self.stress_drop / self.seismic_moment) ** (1. / 3.)) def calc_duration(self): """Compute the duration by combination of source and path. Returns ------- duration : float Computed duration """ # Source component duration_source = 1. / self.corner_freq # Path component if self.region == 'wna': duration_path = 0.05 * self.hypo_distance elif self.region == 'cena': duration_path = 0. if self.hypo_distance > 10: # 10 < R <= 70 km duration_path += 0.16 * (min(self.hypo_distance, 70) - 10.) if self.hypo_distance > 70: # 70 < R <= 130 km duration_path += -0.03 * (min(self.hypo_distance, 130) - 70.) if self.hypo_distance > 130: # 130 km < R duration_path += 0.04 * (self.hypo_distance - 130.) else: raise NotImplementedError return duration_source + duration_path def calc_fourier_amps(self, freqs=None): """Compute the acceleration Fourier amplitudes for a frequency range. Parameters ---------- freqs : array_like, optional Frequency range. If no frequency range is specified then :func:`log_spaced_values(0.05, 200.)` is used. Returns ------- fourier_amps : :class:`np.ndarray` acceleration Fourier amplitudes """ if freqs is None: self._freqs = log_spaced_values(0.05, 200.) else: self._freqs, = sort_increasing(np.asarray(freqs)) self._duration = self.calc_duration() # Model component const = (0.55 * 2.) / (np.sqrt(2.) * 4. * np.pi * self.density * self.shear_velocity ** 3.) source_comp = (const * self.seismic_moment / (1. + (self._freqs / self.corner_freq) ** 2.)) # Path component path_atten = (self.path_atten_coeff * self._freqs ** self.path_atten_power) geo_atten = calc_geometric_spreading(self.hypo_distance, self.geometric_spreading) path_comp = geo_atten * np.exp( (-np.pi * self._freqs * self.hypo_distance) / (path_atten * self.shear_velocity)) # Site component site_dim = np.exp(-np.pi * self.site_atten * self._freqs) ln_freqs = np.log(self._freqs) site_amp = self.site_amp(ln_freqs) if np.any(np.isnan(site_amp)): # Need to extrapolate mask = ln_freqs < self.site_amp.x[0] site_amp[mask] = self.site_amp.y[0] mask = self.site_amp.x[-1] < ln_freqs site_amp[mask] = self.site_amp.y[-1] site_comp = site_amp * site_dim # Conversion factor to convert from dyne-cm into gravity-sec conv = 1.e-20 / 980.7 # Combine the three components and convert from displacement to # acceleration self._fourier_amps = (conv * (2. * np.pi * self._freqs) ** 2. * source_comp * path_comp * site_comp) class CompatibleRvtMotion(RvtMotion): """Response spectrum compatible RVT motion. A :class:`~.motions.CompatibleRvtMotion` object is used to compute a Fourier amplitude spectrum that is compatible with a target response spectrum. """ def __init__(self, osc_freqs, osc_accels_target, duration=None, osc_damping=0.05, event_kwds=None, window_len=None, peak_calculator=None, calc_kwds=None): """Initialize the motion. Parameters ---------- osc_freqs : array_like Frequencies of the oscillator response (Hz). osc_accels_target : :class:`numpy.ndarray` Spectral acceleration of the oscillator at the specified frequencies (g). duration : float, optional Duration of the ground motion (sec). If `None`, then the duration is computed using the `event_kwds`. osc_damping : float, optional Fractional damping of the oscillator (dec). Default value is 0.05 for a damping ratio of 5%. event_kwds : Dict, optional Keywords passed to :class:`~.motions.SourceTheoryMotion` and used to compute the duration of the motion. Either `duration` or `event_kwds` should be specified. window_len : int, optional Window length used for smoothing the computed Fourier amplitude spectrum. If `None`, then no smoothing is applied. The smoothing is applied as a moving average with a width of `window_len`. peak_calculator : :class:`~.peak_calculators.Calculator`, optional Peak calculator to use. If `None`, then the default peak calculator is used. The peak calculator may either be specified by a :class:`~.peak_calculators.Calculator` object, or by the initials of the calculator using :func:`~.peak_calculators.peak_calculator`. calc_kwds : dict, optional Keywords to be passed during the creation the peak calculator. These keywords are only required for some peak calculators. """ super().__init__(peak_calculator=peak_calculator) osc_freqs, osc_accels_target = sort_increasing( np.asarray(osc_freqs), np.asarray(osc_accels_target)) if duration: self._duration = duration else: stm = SourceTheoryMotion(**event_kwds) self._duration = stm.calc_duration() fourier_amps = self._estimate_fourier_amps( osc_freqs, osc_accels_target, osc_damping) # The frequency needs to be extended to account for the fact that the # oscillator transfer function has a width. The number of frequencies # depends on the range of frequencies provided. self._freqs = log_spaced_values(osc_freqs[0] / 2., 2. * osc_freqs[-1]) self._fourier_amps = np.empty_like(self._freqs) # Indices of the first and last point with the range of the provided # response spectra indices = np.argwhere((osc_freqs[0] < self._freqs) & (self._freqs < osc_freqs[-1])) first = indices[0, 0] # last is extend one past the usable range to allow use of first:last # notation last = indices[-1, 0] + 1 log_freqs = np.log(self._freqs) log_osc_freqs = np.log(osc_freqs) self._fourier_amps[first:last] = np.exp( np.interp(log_freqs[first:last], log_osc_freqs, np.log(fourier_amps))) def extrapolate(): """Extrapolate the first and last value of FAS.""" def _extrap(freq, freqs, fourier_amps, max_slope=None): # Extrapolation is performed in log-space using the first and # last two points xi = np.log(freq) x = np.log(freqs) y = np.log(fourier_amps) slope = (y[1] - y[0]) / (x[1] - x[0]) if max_slope: slope = min(slope, max_slope) return np.exp(slope * (xi - x[0]) + y[0]) # Update the first point using the second and third points self._fourier_amps[0:first] = _extrap( self._freqs[0:first], self._freqs[first:first + 2], self._fourier_amps[first:first + 2], None) # Update the last point using the third- and second-to-last points self._fourier_amps[last:] = _extrap( self._freqs[last:], self._freqs[last - 2:last], self._fourier_amps[last - 2:last], None) extrapolate() # Apply a ratio correction between the computed at target response # spectra self.iterations = 0 self.rmse = 1. max_iterations = 30 tolerance = 5e-6 osc_accels = self.calc_osc_accels(osc_freqs, osc_damping) # Smoothing operator if window_len: window =
np.ones(window_len, 'd')
numpy.ones
import argparse import os from importlib import import_module from abp.configs import NetworkConfig, ReinforceConfig, EvaluationConfig from abp.examples.pysc2.tug_of_war.device_handler import get_default_device, to_device, DeviceDataLoader import torch import numpy as np import torch.nn as nn from abp.adaptives import TransAdaptive from torch.utils.data.sampler import SubsetRandomSampler from torch.utils.data.dataloader import DataLoader from torch.optim import Adam def pre_process(data, output_indexes, output_shape): np_data = np.array(data) data_input =
np.stack(np_data[:,0])
numpy.stack
"""General-purpose training script for image-to-image translation. This script works for various models (with option '--model': e.g., pix2pix, cyclegan, colorization) and different datasets (with option '--dataset_mode': e.g., aligned, unaligned, single, colorization). You need to specify the dataset ('--dataroot'), experiment name ('--name'), and model ('--model'). It first creates model, dataset, and visualizer given the option. It then does standard network training. During the training, it also visualize/save the images, print/save the loss plot, and save models. The script supports continue/resume training. Use '--continue_train' to resume your previous training. Example: Train a CycleGAN model: python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan Train a pix2pix model: python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA See options/base_options.py and options/train_options.py for more training options. See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md """ import time from options.train_options import TrainOptions from data import create_dataset from models import create_model from util.visualizer import Visualizer import util.util import numpy as np import shutil from torch.utils.tensorboard import SummaryWriter import matplotlib.pyplot as plt import os from help_rename import * if __name__ == '__main__': x_epoch = [] y_fid = [] y_kid_mean = [] y_kid_std = [] y_is_mean = [] y_is_std = [] opt = TrainOptions().parse() # get training options dir_tensorboard = './tensorboard/' + opt.name os.makedirs(dir_tensorboard, exist_ok=True) writer = SummaryWriter('./tensorboard/' + opt.name) dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options dataset_size = len(dataset) # get the number of images in the dataset. print('The number of training images = %d' % dataset_size) model = create_model(opt) # create a model given opt.model and other options model.setup(opt) # regular setup: load and print networks; create schedulers visualizer = Visualizer(opt) # create a visualizer that display/save images and plots total_iters = 0 # the total number of training iterations for epoch in range(opt.epoch_count, opt.n_epochs + opt.n_epochs_decay + 1): # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq> # Gilli & Rotem are having FUN if opt.model == 'cycle_gan': dir_img_train = f'./results/train_img_F/{epoch}' os.makedirs(dir_img_train, exist_ok=True) epoch_start_time = time.time() # timer for entire epoch iter_data_time = time.time() # timer for data loading per iteration epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch visualizer.reset() # reset the visualizer: make sure it saves the results to HTML at least once every epoch model.update_learning_rate() # update learning rates in the beginning of every epoch. for i, data in enumerate(dataset): # inner loop within one epoch iter_start_time = time.time() # timer for computation per iteration if total_iters % opt.print_freq == 0: t_data = iter_start_time - iter_data_time total_iters += opt.batch_size epoch_iter += opt.batch_size model.set_input(data) # unpack data from dataset and apply preprocessing model.optimize_parameters() # calculate loss functions, get gradients, update network weights if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file save_result = total_iters % opt.update_html_freq == 0 model.compute_visuals() visualizer.display_current_results(model.get_current_visuals(), epoch, save_result) if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk losses = model.get_current_losses() t_comp = (time.time() - iter_start_time) / opt.batch_size visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data) if opt.display_id > 0: visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses) for item in losses: writer.add_scalar(item, losses[item], total_iters) if total_iters % opt.save_latest_freq == 0: # cache our latest model every <save_latest_freq> iterations print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters)) save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest' model.save_networks(save_suffix) iter_data_time = time.time() # Gilli & Rotem are having FUN # save images to the disk if opt.model == 'cycle_gan' and epoch_iter % 10 == 0: for label, image in model.get_current_visuals().items(): if label == 'fake_B': image_numpy = util.util.tensor2im(image) img_path = os.path.join(dir_img_train, 'epoch%.3d_iter%.4d_%s.png' % (epoch, epoch_iter, label)) util.util.save_image(image_numpy, img_path) # Gilli & Rotem are having FUN # Metric calcs if opt.model == 'cycle_gan': metric_dict = calculate_metrics(dir_img_train, './datasets/XR2DRR/trainB', cuda=True, isc=True, fid=True, kid=True, verbose=False, kid_subset_size=250) print(metric_dict) x_epoch.append(epoch) y_fid.append(metric_dict['frechet_inception_distance']) writer.add_scalar('fid', metric_dict['frechet_inception_distance'], epoch) y_kid_mean.append(metric_dict['kernel_inception_distance_mean']) writer.add_scalar('kid_mean', metric_dict['kernel_inception_distance_mean'], epoch) y_kid_std.append(metric_dict['kernel_inception_distance_std']) writer.add_scalar('kid_std', metric_dict['kernel_inception_distance_std'], epoch) y_is_std.append(metric_dict['inception_score_std']) writer.add_scalar('is_std', metric_dict['inception_score_std'], epoch) y_is_mean.append(metric_dict['inception_score_mean']) writer.add_scalar('is_mean', metric_dict['inception_score_mean'], epoch) shutil.rmtree(dir_img_train) if epoch % opt.save_epoch_freq == 0: # cache our model every <save_epoch_freq> epochs print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters)) model.save_networks('latest') model.save_networks(epoch) print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.n_epochs + opt.n_epochs_decay, time.time() - epoch_start_time)) # Gilli & Rotem are having FUN # Metric save if opt.model=='cycle_gan': x_epoch_np =
np.array(x_epoch)
numpy.array
import tensorflow as tf from tensorflow.python.saved_model import tag_constants from PIL import ImageGrab import cv2 import numpy as np from scipy.spatial import distance # Set option threshold = 0.7 input_size = 416 left_top_x = 200 left_top_y = 200 detect_width = 1280 detect_height = 720 # Set path model_path = 'models/' # Set file name class_info = 'obj.names' model = 'yolov4-0613' # 사용할 모델 'yolov4-first', 'yolov4-0613' # Variables weights = model_path + model saved_model_loaded = tf.saved_model.load(weights, tags=[tag_constants.SERVING]) infer = saved_model_loaded.signatures['serving_default'] Five = False def find_nearest(tables, point): nearest_index = distance.cdist([point], tables).argmin() return nearest_index # 인덱스 반환 def read_class_names(class_file_name): names = {} with open(class_file_name, 'r') as data: for ID, name in enumerate(data): names[ID] = name.strip('\n') return names def draw_bbox(s_image, bboxes, classes_name=None, show_label=True, five=False): if classes_name is None: classes_name = read_class_names(class_info) num_classes = len(classes_name) image_h, image_w, _ = s_image.shape colors = [[255, 128, 0], [128, 255, 128]] people_coords = [] table_coords = [] out_boxes, out_scores, out_classes, num_boxes = bboxes classes_cnt = [0] * num_classes table_cnt = 1 for i in range(num_boxes[0]): if int(out_classes[0][i]) < 0 or int(out_classes[0][i]) > num_classes: continue coor = out_boxes[0][i] coor[0] = int(coor[0] * image_h) coor[2] = int(coor[2] * image_h) coor[1] = int(coor[1] * image_w) coor[3] = int(coor[3] * image_w) fontScale = 0.5 score = out_scores[0][i] class_ind = int(out_classes[0][i]) bbox_color = colors[class_ind] bbox_thick = int(0.6 * (image_h + image_w) / 600) c1, c2 = (coor[1], coor[0]), (coor[3], coor[2]) # 클래스별로 카운팅을 위함 print(classes_name[class_ind]) classes_cnt[class_ind] += 1 # print("left_top : ", c1, ", right_bottom: ", c2) # 박스 중앙 점 x, y 좌표 계산 center_x = int((c1[0] + c2[0]) / 2) center_y = int((c1[1] + c2[1]) / 2) print("x: ", center_x, ", y: ", center_y) # 클래스별 좌표 저장 if classes_name[class_ind] == "Person": people_coords.append([center_x, center_y]) elif classes_name[class_ind] == "Table": table_coords.append([center_x, center_y]) print() # boxing object cv2.rectangle(s_image, c1, c2, bbox_color, bbox_thick) if show_label: if classes_name[class_ind] == 'Table': bbox_mess = '%s_%d: %.2f' % (classes_name[class_ind], table_cnt, score) table_cnt += 1 else: bbox_mess = '%s: %.2f' % (classes_name[class_ind], score) t_size = cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick // 2)[0] c3 = (c1[0] + t_size[0], c1[1] - t_size[1] - 3) cv2.rectangle(s_image, c1, (
np.float32(c3[0])
numpy.float32
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Copyright 2021 InferStat Ltd # Created by: <NAME> # Created date: 11/03/2021 """ Allocation algorithms are functions used to compute allocations - % of your portfolio or maximum investment size to invest in a market or asset. """ # External packages import numpy as np import pandas as pd import inspect import os from typing import List, Callable, Dict # InferStat packages from infertrade.PandasEnum import PandasEnum import infertrade.utilities.operations as operations import infertrade.algos.community.signals as signals from infertrade.algos.community.permalinks import data_dictionary def fifty_fifty(dataframe) -> pd.DataFrame: """Allocates 50% of strategy budget to asset, 50% to cash.""" dataframe["allocation"] = 0.5 return dataframe def buy_and_hold(dataframe: pd.DataFrame) -> pd.DataFrame: """Allocates 100% of strategy budget to asset, holding to end of period (or security bankruptcy).""" dataframe[PandasEnum.ALLOCATION.value] = 1.0 return dataframe def chande_kroll_crossover_strategy(dataframe: pd.DataFrame,) -> pd.DataFrame: """ This simple all-or-nothing rule: (1) allocates 100% of the portofolio to a long position on the asset when the price of the asset is above both the Chande Kroll stop long line and Chande Kroll stop short line, and (2) according to the value set for the allow_short_selling parameter, either allocates 0% of the portofiolio to the asset or allocates 100% of the portfolio to a short position on the asset when the price of the asset is below both the Chande Kroll stop long line and the Chande Kroll stop short line. """ # Calculate the Chande Kroll lines, which will be added to the DataFrame as columns named "chande_kroll_long" and # "chande_kroll_short". dataframe = signals.chande_kroll(dataframe) # Allocate positions according to the Chande Kroll lines is_price_above_lines = (dataframe["price"] > dataframe["chande_kroll_long"]) & ( dataframe["price"] > dataframe["chande_kroll_short"] ) is_price_below_lines = (dataframe["price"] < dataframe["chande_kroll_long"]) & ( dataframe["price"] < dataframe["chande_kroll_short"] ) dataframe.loc[is_price_above_lines, PandasEnum.ALLOCATION.value] = 1.0 dataframe.loc[is_price_below_lines, PandasEnum.ALLOCATION.value] = -1.0 # Delete the columns with the Chande Kroll indicators before returning dataframe.drop(columns=["chande_kroll_long", "chande_kroll_short"], inplace=True) return dataframe def change_relationship(dataframe: pd.DataFrame) -> pd.DataFrame: """ Calculates a change relationship, which compares the asset's future price change to the last change in the signal series. Notes: - Does not fill NaNs in input, so full data needs to be supplied. - Error estimation uses same window as used for calibrating regression coefficients """ df = dataframe.copy() regression_period = 120 minimum_length_to_calculate = regression_period + 1 if len(df[PandasEnum.MID.value]) < minimum_length_to_calculate: df[PandasEnum.ALLOCATION.value] = 0.0 return df df = calculate_change_relationship(df, regression_period) return df def calculate_change_relationship( df: pd.DataFrame, regression_period: int = 120, kelly_fraction: float = 1.0 ) -> pd.DataFrame: """Calculates allocations for change relationship.""" dataframe = df.copy() dataframe[PandasEnum.SIGNAL.value] = dataframe["research"] forecast_period = 100 signal_lagged = operations.lag( np.reshape(dataframe[PandasEnum.SIGNAL.value].append(pd.Series([0])).values, (-1, 1)), shift=1 ) signal_lagged_pct_change = operations.pct_chg(signal_lagged) signal_lagged_pct_change[0] = [0.0] signal_lagged_pct_change[1] = [0.0] last_feature_row = signal_lagged_pct_change[-1:] signal_lagged_pct_change = signal_lagged_pct_change[:-1] price_pct_chg = operations.pct_chg(dataframe[PandasEnum.MID.value]) price_pct_chg[0] = [0.0] dataframe = operations.calculate_regression_with_kelly_optimum( dataframe, feature_matrix=signal_lagged_pct_change, last_feature_row=last_feature_row, target_array=price_pct_chg, regression_period=regression_period, forecast_period=forecast_period, kelly_fraction=kelly_fraction, ) return dataframe def combination_relationship(dataframe: pd.DataFrame) -> pd.DataFrame: """ Calculates a combination relationship, which compares the asset's future price change to the multivariate regression of the level of the signal, the last change in the signal and the difference between the signal and the price. Notes: - Does not fill NaNs in input, so full data needs to be supplied. - Error estimation uses same window as used for calibrating regression coefficients """ df = dataframe.copy() regression_period = 120 minimum_length_to_calculate = regression_period + 1 if len(df[PandasEnum.MID.value]) < minimum_length_to_calculate: df[PandasEnum.ALLOCATION.value] = 0.0 return df df = calculate_combination_relationship(df, regression_period) return df def calculate_combination_relationship(df: pd.DataFrame, regression_period: int = 120, kelly_fraction: float = 1.0): """Calculates allocations for combination relationship.""" dataframe = df.copy() dataframe[PandasEnum.SIGNAL.value] = dataframe.loc[:, "research"] forecast_period = 100 signal_lagged = operations.lag( np.reshape(dataframe[PandasEnum.SIGNAL.value].append(pd.Series([0])).values, (-1, 1)), shift=1 ) signal_lagged[0] = [0.0] signal_lagged_pct_change = operations.pct_chg(signal_lagged) signal_lagged_pct_change[0] = [0.0] signal_lagged_pct_change[1] = [0.0] signal_differenced = operations.research_over_price_minus_one( np.column_stack( ( dataframe[PandasEnum.MID.value].append(pd.Series([0])).values, dataframe[PandasEnum.SIGNAL.value].append(pd.Series([0])).values, ) ), shift=1, ) signal_differenced[0] = [0.0] intermediate_matrix = np.column_stack((signal_lagged, signal_lagged_pct_change, signal_differenced)) last_feature_row = intermediate_matrix[-1:] intermediate_matrix = intermediate_matrix[:-1] price_pct_chg = operations.pct_chg(dataframe[PandasEnum.MID.value]) price_pct_chg[0] = [0.0] dataframe = operations.calculate_regression_with_kelly_optimum( dataframe, feature_matrix=intermediate_matrix, last_feature_row=last_feature_row, target_array=price_pct_chg, regression_period=regression_period, forecast_period=forecast_period, kelly_fraction=kelly_fraction, ) return dataframe def constant_allocation_size(dataframe: pd.DataFrame, fixed_allocation_size: float = 1.0) -> pd.DataFrame: """ Returns a constant allocation, controlled by the fixed_allocation_size parameter. parameters: fixed_allocation_size: determines allocation size. """ dataframe[PandasEnum.ALLOCATION.value] = fixed_allocation_size return dataframe def difference_relationship(dataframe: pd.DataFrame) -> pd.DataFrame: """ Calculates a difference relationship, which compares the asset's future price change to the last difference between the signal series and asset price. Notes: - Does not fill NaNs in input, so full data needs to be supplied. - Error estimation uses same window as used for calibrating regression coefficients """ df = dataframe.copy() regression_period = 120 minimum_length_to_calculate = regression_period + 1 if len(df[PandasEnum.MID.value]) < minimum_length_to_calculate: df[PandasEnum.ALLOCATION.value] = 0.0 return df df = calculate_difference_relationship(df, regression_period) return df def calculate_difference_relationship(df: pd.DataFrame, regression_period: int = 120, kelly_fraction: float = 1.0): """Calculates allocations for difference relationship.""" dataframe = df.copy() dataframe[PandasEnum.SIGNAL.value] = dataframe["research"] forecast_period = 100 signal_differenced = operations.research_over_price_minus_one( np.column_stack( ( dataframe[PandasEnum.MID.value].append(pd.Series([0])).values, dataframe[PandasEnum.SIGNAL.value].append(pd.Series([0])).values, ) ), shift=1, ) signal_differenced[0] = [0.0] last_feature_row = signal_differenced[-1:] signal_differenced = signal_differenced[:-1] price_pct_chg = operations.pct_chg(dataframe[PandasEnum.MID.value]) price_pct_chg[0] = [0.0] dataframe = operations.calculate_regression_with_kelly_optimum( dataframe, feature_matrix=signal_differenced, last_feature_row=last_feature_row, target_array=price_pct_chg, regression_period=regression_period, forecast_period=forecast_period, kelly_fraction=kelly_fraction, ) return dataframe def high_low_difference(dataframe: pd.DataFrame, scale: float = 1.0, constant: float = 0.0) -> pd.DataFrame: """ Returns an allocation based on the difference in high and low values. This has been added as an example with multiple series and parameters. parameters: scale: determines amplitude factor. constant: scalar value added to the allocation size. """ dataframe[PandasEnum.ALLOCATION.value] = (dataframe["high"] - dataframe["low"]) * scale + constant return dataframe def level_relationship(dataframe: pd.DataFrame) -> pd.DataFrame: """ Calculates a level relationship, which compares the asset's future price change to the last value of the signal series. Notes: - Does not fill NaNs in input, so full data needs to be supplied. - Error estimation uses same window as used for calibrating regression coefficients """ df = dataframe.copy() regression_period = 120 minimum_length_to_calculate = regression_period + 1 if len(df[PandasEnum.MID.value]) < minimum_length_to_calculate: df[PandasEnum.ALLOCATION.value] = 0.0 return df df = calculate_level_relationship(df, regression_period) return df def calculate_level_relationship(df: pd.DataFrame, regression_period: int = 120, kelly_fraction: float = 1.0): """Calculates allocations for level relationship.""" dataframe = df.copy() dataframe[PandasEnum.SIGNAL.value] = dataframe.loc[:, "research"] forecast_period = 100 signal_lagged = operations.lag( np.reshape(dataframe[PandasEnum.SIGNAL.value].append(pd.Series([0])).values, (-1, 1)), shift=1 ) # revert back to manually calculating last row? doing it manually seems awkward, doing it this way seems # wasteful, altering the the lag (or other) function seems hacky signal_lagged[0] = [0.0] last_feature_row = signal_lagged[-1:] signal_lagged = signal_lagged[:-1] price_pct_chg = operations.pct_chg(dataframe[PandasEnum.MID.value]) price_pct_chg[0] = [0.0] dataframe = operations.calculate_regression_with_kelly_optimum( dataframe, feature_matrix=signal_lagged, last_feature_row=last_feature_row, target_array=price_pct_chg, regression_period=regression_period, forecast_period=forecast_period, kelly_fraction=kelly_fraction, ) return dataframe def sma_crossover_strategy(dataframe: pd.DataFrame, fast: int = 0, slow: int = 0) -> pd.DataFrame: """ A Simple Moving Average crossover strategy, buys when short-term SMA crosses over a long-term SMA. parameters: fast: determines the number of periods to be included in the short-term SMA. slow: determines the number of periods to be included in the long-term SMA. """ # Set price to dataframe price column price = dataframe["price"] # Compute Fast and Slow SMA fast_sma = price.rolling(window=fast, min_periods=fast).mean() slow_sma = price.rolling(window=slow, min_periods=slow).mean() position =
np.where(fast_sma > slow_sma, 1.0, 0.0)
numpy.where
# Automatically adapted for numpy.oldnumeric Aug 01, 2007 by #!/usr/bin/env python import cdms2 import numpy import os import sys import basetest class TestGenericGrids(basetest.CDMSBaseTest): def testGenGrids2(self): latb = [62.47686472, 69.70600048] lonb = [102.87075526, 105.51598035] fn = self.getDataFile('sampleCurveGrid4.nc') s = fn("sample") g = s.getGrid() lat = g.getLatitude() lon = g.getLongitude() g2 = cdms2.createGenericGrid(lat, lon) datalat = g2.getLatitude().getBounds()[22, 25] datalon = g2.getLongitude().getBounds()[22, 25] self.assertTrue(numpy.ma.allclose(datalat, latb)) self.assertTrue(numpy.ma.allclose(datalon, lonb)) def testGenGrids(self): datb = numpy.array([693., 694., ]) latb = numpy.array([-26.67690036, -30.99890917, ]) lonb = numpy.array([92.41822415, 94.4512163, ]) f = self.getDataFile('sampleGenGrid3.nc') # Slice a file variable on a curvilinear grid: by coordinates ... samp = f['sample'] x = samp(lat=(-32, -25), lon=(90, 95)) self.assertFalse(not numpy.ma.allequal(x.data, datb)) grid = x.getGrid() self.assertFalse(grid.shape != (2,)) lat = grid.getLatitude() self.assertFalse(not
numpy.ma.allclose(lat.data, latb, atol=1.e-5)
numpy.ma.allclose
from ccdproc import combine, CCDData, ccd_process, subtract_bias, subtract_dark, flat_correct import astropy.io.fits as fits, astropy.units as units #import astroscrappy import numpy as np import argparse, os, re, copy #import tensorflow as tf parser = argparse.ArgumentParser() parser.add_argument('--bias') parser.add_argument('--masterbias') parser.add_argument('--bias_method', default='average') parser.add_argument('--bias_sigmaclip', default=1, type=int) parser.add_argument('--dark') parser.add_argument('--masterdark') parser.add_argument('--dark_method', default='average') parser.add_argument('--dark_exptime', default=300.0, type=float) parser.add_argument('--dark_sigmaclip', default=1, type=int) parser.add_argument('--flat') parser.add_argument('--masterflat') parser.add_argument('--flat_method', default='average') parser.add_argument('--flat_sigmaclip', default=1, type=int) parser.add_argument('--light') parser.add_argument('--light_method', default='average') parser.add_argument('--light_sigmaclip', default=1, type=int) parser.add_argument('--output', default='light_combined') parser.add_argument('--crrejection', default='0') parser.add_argument('--fits_header_exptime', default='EXPTIME') parser.add_argument('--fits_header_ccdtemp', default='CCD-TEMP') parser.add_argument('--fits_header_filter', default='FILTER') parser.add_argument('--filter_threshold') parser.add_argument('--header_from', default=None) parser.add_argument('--pedestal', default=1024, type=int) parser.add_argument('--offset', default=None) args = parser.parse_args() def error(description): print("Error: " + description) exit() def warning(description): print("Warning: " + description) def log(description): print(description) biascal = False darkcal = False flatcal = False bias = None dark = None flat = None darkexp = 1.0 if args.masterbias == None and args.bias != None: biascal = True #Creating master bias try: biaslist_f = open(args.bias, 'r') except: error("Bias file list not found: " + args.bias) biaslist = biaslist_f.read() biaslist = biaslist.replace('\r\n', '\n') biaslist = biaslist.replace('\r', '\n') biaslist = biaslist.split('\n') log("Loading bias file(s)...") biascnt = 0 temperature = False biaslist_real = copy.copy(biaslist) for i in biaslist: if os.path.isfile(i) == False: warning("Bias file not found: " + i) biaslist_real.remove(i) else: try: hdulist = fits.open(i) biascnt = biascnt + 1 log("Using bias file: " + i) headers = dict(hdulist[0].header) if temperature == False: temperature = headers[args.fits_header_ccdtemp] log("Temperature: " + str(temperature) + "°C") else: if temperature != headers[args.fits_header_ccdtemp]: warning("Temperature mismatch: " + i + " (" + str(headers[args.fits_header_ccdtemp]) + "°C)") exptime = headers[args.fits_header_exptime] if exptime != 0.0: warning("Exposure time is not 0 second: " + i + " (" + str(exptime) + " second(s))") except OSError: warning("Not proper FITS format: " + i) biaslist_real.remove(i) if biascnt == 0: error("No proper bias file loaded.") biasindex = 0 mbias = 'masterbias_' + str(temperature) + '.fits' while True: if os.path.isfile(mbias) == False: break mbias = 'masterbias_' + str(temperature) + '_' + str(biasindex) + '.fits' biasindex = biasindex + 1 log("Combining " + str(biascnt) + " bias frame(s) to " + mbias + ", 3-sigma clipping") if args.bias_sigmaclip == 1: bias_sigmaclip = True else: bias_sigmaclip = False bias = combine(biaslist_real, output_file=mbias, method=args.bias_method, unit='adu', sigma_clip=bias_sigmaclip) hdulist = fits.open(mbias, mode='update') hdulist[0].header.set(args.fits_header_exptime, 0.0) hdulist[0].header.set(args.fits_header_ccdtemp, temperature) hdulist.flush() hdulist.close() elif args.masterbias != None: biascal = True mbias = args.masterbias if os.path.isfile(mbias) == False: error("Master bias file not found: " + mbias) try: hdulist = fits.open(mbias) except OSError: error("Not proper FITS format: " + mbias) log("Using master bias file: " + mbias) headers = dict(hdulist[0].header) log("Master bias temperature: " + str(headers[args.fits_header_ccdtemp]) + "°C") exptime = headers[args.fits_header_exptime] if exptime != 0.0: warning("Exposure time is not 0 second: " + str(exptime) + " second(s)") bias = CCDData.read(mbias, unit='adu') if args.masterdark == None and args.dark != None: if bias == None: #error("Master bias needed to create master dark frame.") warning("No master bias provided. Stacking without bias calibration.") darkcal = True #Creating master dark try: darklist_f = open(args.dark, 'r') except: error("Dark file list not found: " + args.dark) darklist = darklist_f.read() darklist = darklist.replace('\r\n', '\n') darklist = darklist.replace('\r', '\n') darklist = darklist.split('\n') log("Loading dark file(s)...") darkcnt = 0 temperature = False darklist_real = copy.copy(darklist) darkscales = [] for i in darklist: if os.path.isfile(i) == False: warning("Dark file not found: " + i) darklist_real.remove(i) else: try: hdulist = fits.open(i) darkcnt = darkcnt + 1 log("Using dark file: " + i) headers = dict(hdulist[0].header) if temperature == False: temperature = headers[args.fits_header_ccdtemp] log("Temperature: " + str(temperature) + "°C") else: if temperature != headers[args.fits_header_ccdtemp]: warning("Temperature mismatch: " + i + " (" + str(headers[args.fits_header_ccdtemp]) + "°C)") exptime = headers[args.fits_header_exptime] if bias != None: darkscales.append(args.dark_exptime/exptime) log("Scaling dark by " + str(args.dark_exptime/exptime)) else: darkscales.append(1) log("No bias provided: not scaling dark") except OSError: warning("Not proper FITS format: " + i) darklist_real.remove(i) if darkcnt == 0: error("No proper dark file loaded.") darkindex = 0 mdark = 'masterdark_' + str(args.dark_exptime) + '_' + str(temperature) + '.fits' while True: if os.path.isfile(mdark) == False: break mdark = 'masterdark_' + str(args.dark_exptime) + '_' + str(temperature) + '_' + str(darkindex) + '.fits' darkindex = darkindex + 1 log("Combining " + str(darkcnt) + " dark frame(s) to " + mdark + ", 3-sigma clipping") if args.dark_sigmaclip == 1: dark_sigmaclip = True else: dark_sigmaclip = False darkscales = np.array(darkscales) #print(darkscales) dark = combine(darklist_real, method=args.dark_method, unit='adu', sigma_clip=dark_sigmaclip, scale=darkscales) if bias != None: dark = ccd_process(dark, master_bias=bias) dark.write(mdark) hdulist = fits.open(mdark, mode='update') hdulist[0].header.set(args.fits_header_exptime, args.dark_exptime) hdulist[0].header.set(args.fits_header_ccdtemp, temperature) darkexp = args.dark_exptime hdulist.flush() hdulist.close() elif args.masterdark != None: darkcal = True mdark = args.masterdark if os.path.isfile(mdark) == False: error("Master dark file not found: " + mdark) try: hdulist = fits.open(mdark) except OSError: error("Not proper FITS format: " + mdark) log("Using master dark file: " + mdark) headers = dict(hdulist[0].header) log("Master dark temperature: " + str(headers[args.fits_header_ccdtemp]) + "°C") log("Master dark exposure: " + str(headers[args.fits_header_exptime]) + " second(s)") darkexp = headers[args.fits_header_exptime] dark = CCDData.read(mdark, unit='adu') if args.masterflat == None and args.flat != None: if dark == None: error("Master dark needed to create master flat frame.") if bias == None: warning("No master bias provided. Stacking without bias calibration.") flatcal = True #Creating master flat try: flatlist_f = open(args.flat, 'r') except: error("Flat file list not found: " + args.flat) flatlist = flatlist_f.read() flatlist = flatlist.replace('\r\n', '\n') flatlist = flatlist.replace('\r', '\n') flatlist = flatlist.split('\n') log("Loading flat file(s)...") flatcnt = 0 temperature = False flatfilter = False flats = [] flatlist_real = copy.copy(flatlist) for i in flatlist: if os.path.isfile(i) == False: warning("Flat file not found: " + i) flatlist_real.remove(i) else: try: hdulist = fits.open(i) log("Using flat file: " + i) headers = dict(hdulist[0].header) if temperature == False: temperature = headers[args.fits_header_ccdtemp] log("Temperature: " + str(temperature) + "°C") else: if temperature != headers[args.fits_header_ccdtemp]: warning("Temperature mismatch: " + i + " (" + str(headers[args.fits_header_ccdtemp]) + "°C)") exptime = headers[args.fits_header_exptime] if flatfilter == False: flatfilter = headers[args.fits_header_filter].strip() else: if flatfilter != headers[args.fits_header_filter].strip(): warning("Filter mismatch: " + i + " (" + headers[args.fits_header_filter].strip() + " filter)") log("Subtracting dark and/or bias...") flats.append(CCDData.read(i, unit='adu')) if bias != None: flats[flatcnt] = ccd_process(flats[flatcnt], master_bias=bias, dark_frame=dark, dark_exposure=darkexp * units.s, data_exposure=exptime * units.s, dark_scale=True) else: flats[flatcnt] = ccd_process(flats[flatcnt], dark_frame=dark, dark_exposure=darkexp * units.s, data_exposure=exptime * units.s, dark_scale=True) avg =
np.mean(flats[flatcnt])
numpy.mean
import cv2 import numpy as np import matplotlib.pylab as plt def region_of_interest(img, vertices): mask = np.zeros_like(img) # channel_count = img.shape[2] match_mask_color = 255 cv2.fillPoly(mask, vertices, match_mask_color) masked_image = cv2.bitwise_and(img, mask) return masked_image def draw_the_lines(img, lines): img = np.copy(img) blank_image = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8) for line in lines: for x1, y1, x2, y2 in line: cv2.line(blank_image, (x1, y1), (x2, y2), (0, 255, 0), thickness=3) img = cv2.addWeighted(img, 0.8, blank_image, 1, 0.0) return img # image = cv2.imread("Resources/road.jpg") # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) def process(image): print(image.shape) height = image.shape[0] width = image.shape[1] region_of_interest_vertices = [ (0, height), (0, height-70), (width*0.47, height*0.6), (width -280, height) ] gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) canny_image = cv2.Canny(gray_image, 100, 200) cropped_image = region_of_interest(canny_image,
np.array([region_of_interest_vertices], np.int32)
numpy.array
# -*- coding: utf-8 -*- """ Created on Tue Feb 14 15:07:16 2021 @author: <NAME> """ import unittest import numpy as np import ase import sys sys.path.append("../pyTEMlib/") import pyTEMlib.kinematic_scattering as ks import pyTEMlib.crystal_tools as cs class TestUtilityFunctions(unittest.TestCase): def test_Zuo_fig_3_18(self): atoms = ks.Zuo_fig_3_18(verbose=True) self.assertIsInstance(atoms.info, dict) self.assertEqual(atoms.symbols[0], 'Si') self.assertEqual(atoms.cell[0, 0], 5.14) self.assertEqual(atoms.info['experimental']['acceleration_voltage_V'], 99.2*1000.0) self.assertEqual(atoms.info['experimental']['convergence_angle_mrad'], 7.15) np.testing.assert_allclose(atoms.info['experimental']['zone_hkl'], np.array([-2, 2, 1])) def test_example(self): atoms = ks.example(verbose=False) self.assertEqual(atoms.info['output']['plot_HOLZ'], 1) def test_zone_mistilt(self): rotated_zone_axis = ks.zone_mistilt([1, 0, 0], [45, 0, 0])
np.testing.assert_allclose(rotated_zone_axis, [1, 0, 0])
numpy.testing.assert_allclose
import numpy as np import dynesty from scipy.special import erf from utils import get_rstate, get_printing nlive = 100 printing = get_printing() win = 100 def loglike(x): return -0.5 * x[1]**2 def prior_transform(x): return (2 * x - 1) * win def test_periodic(): # hard test of dynamic sampler with high dlogz_init and small number # of live points logz_true = np.log(
np.sqrt(2 * np.pi)
numpy.sqrt
#!/usr/bin/env python # coding: utf-8 # In[1]: import pandas as pd import matplotlib.pyplot as plt import numpy as np import glob import os from matplotlib import rcParams rcParams['font.family'] = 'sans-serif' rcParams['font.sans-serif'] = ['Hiragino Maru Gothic Pro', 'Yu Gothic', 'Meirio', 'Takao', 'IPAexGothic', 'IPAPGothic', 'VL PGothic', 'Noto Sans CJK JP'] # In[2]: # Make directory # # df_hospital_beds = pd.read_csv('data_Koro/hospital_beds.csv',index_col=0) # dirnames = (df_hospital_beds['japan_prefecture_code']+df_hospital_beds['都道府県名']).values # for i in range(len(dirnames)): # path = 'resultD_transport_strategy_hospital/' + dirnames[i] # os.makedirs(path, exist_ok=True) # In[3]: # MODE = 'all' MODE = 'normal' filenames = glob.glob('data_hospital/x_*') filenames.sort() forecast_dates = [filename.split('_')[-1].split('.')[0] for filename in filenames] # In[ ]: # In[18]: def visualization(gamma,x_type,forecast_date): print("forcasted date ={0}".format(f_date)) # 重みの入力 df_w = pd.read_csv('data_Kokudo/w_distance.csv',index_col=0) W= df_w.values w_pulp = W.T.reshape(-1) # x, x_q0025も計算 df_x0975 = pd.read_csv('data_hospital/x0975_{0}.csv'.format(forecast_date),index_col=0 ) df_x0025 = pd.read_csv('data_hospital/x0025_{0}.csv'.format(forecast_date),index_col=0 ) df_xmean = pd.read_csv('data_hospital/x_{0}.csv'.format(forecast_date),index_col=0 ) gammas = np.load('data_hospital_transport/gammas_{0}_{1:03}_{2}.npy'.format(x_type,int(gamma*100),forecast_date)) x_mean = df_xmean.values x_q0975 = df_x0975.values x_q0025 = df_x0025.values N = x_mean.shape[1] T = x_mean.shape[0] L = np.kron(np.ones((1,N)),np.eye(N)) - np.kron(np.eye(N),np.ones((1,N))) uv = np.load('data_hospital_transport/u_{0}_{1:03}_{2}.npy'.format(x_type,int(gamma*100),forecast_date)) y_mean = np.zeros(x_mean.shape) y_q0975 = np.zeros(x_mean.shape) y_q0025 = np.zeros(x_mean.shape) y_mean[0] = x_mean[0] y_q0975[0] = x_q0975[0] y_q0025[0] = x_q0025[0] sum_u = np.zeros(T) sum_cost = np.zeros(T) for k in range(T-1): y_mean[k+1] = y_mean[k] + x_mean[k+1] - x_mean[k] + L.dot(uv[k]) y_q0975[k+1] = y_q0975[k] + x_q0975[k+1] - x_q0975[k] + L.dot(uv[k]) y_q0025[k+1] = y_q0025[k] + x_q0025[k+1] - x_q0025[k] + L.dot(uv[k]) sum_u[k+1] = np.sum(uv[k]) sum_cost[k+1] = np.sum(w_pulp*uv[k]) # ベット数の入力 df_hospital_beds = pd.read_csv('data_Koro/hospital_beds.csv',index_col=0) dirnames = (df_hospital_beds['japan_prefecture_code']+df_hospital_beds['都道府県名']).values names = df_hospital_beds['都道府県名'].values weeks = df_hospital_beds.columns[2:].values new_week = max(weeks) M = df_hospital_beds[new_week].values times = pd.to_datetime(df_xmean.index) date_s = min(times) date_e = max(times) # 全国の入院者数の予測値 plt.figure(figsize = (6,4)) plt.fill_between(times,x_q0025.sum(axis=1),x_q0975.sum(axis=1),facecolor = 'lime',alpha = 0.3,label = '95%信頼区間') plt.plot(times,x_mean.sum(axis=1),'*-',color = 'lime',label = '平均値') plt.plot([date_s,date_e],np.ones(2)*0.8*M.sum(),"--",label = '病床使用率 80%',color = 'red',linewidth = 2.0) plt.plot([date_s,date_e],np.ones(2)*M.sum(),"--",label = '病床使用率 100%',color = 'purple',linewidth = 2.0) plt.gca().tick_params(axis='x', rotation= -60) plt.title('全国の入院者数の予測値, 予測日={0}'.format(forecast_date),fontsize = 15) plt.xlim([date_s,date_e]) plt.ylim([0, 1.5* M.sum(),]) plt.ylabel('入院者数 [人]') plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0) plt.grid() plt.savefig('resultB_google_prediction/all_hospital_{0}.png'.format(forecast_date),bbox_inches='tight',dpi = 100) if MODE == 'normal': plt.savefig('resultB_google_prediction/all_hospital.png',bbox_inches='tight',dpi = 100) plt.close() # 県ごとの入院者数 plt.figure(figsize = (50,25)) plt.subplots_adjust(wspace=0.1, hspace=0.5) for i in range(47): plt.subplot(10,5,i+1) plt.fill_between(times,x_q0025[:,i],x_q0975[:,i],facecolor = 'lime',alpha = 0.3,label = '95%信頼区間') plt.plot(times,x_mean[:,i],'*-',color = 'lime',label = '平均値') plt.plot([date_s,date_e],np.ones(2)*0.8*M[i],"--",label = '病床使用率 80%',color = 'red',linewidth = 2.0) plt.plot([date_s,date_e],np.ones(2)*M[i],"--",label = '病床使用率 100%',color = 'purple',linewidth = 2.0) plt.gca().tick_params(axis='x', rotation= -60) plt.title(names[i],fontsize = 20) plt.xlim([date_s,date_e]) plt.ylim([0, 1.5* M[i]]) plt.grid() if i < 42: plt.tick_params(labelbottom=False) if i == 0: plt.legend() plt.savefig('resultB_google_prediction/each_hospital_{0}.png'.format(forecast_date),bbox_inches='tight',dpi = 100) if MODE == 'normal': plt.savefig('resultB_google_prediction/each_hospital.png',bbox_inches='tight',dpi = 100) plt.close() # 県ごとの感染者数の予測結果 plt.figure(figsize = (50,25)) plt.subplots_adjust(wspace=0.1, hspace=0.5) for i in range(47): plt.subplot(10,5,i+1) max_beds = M[i] # ベットの限界 plt.plot([date_s,date_e],[0.8*max_beds,0.8*max_beds],'--',label = '病床使用率80%',color = 'red',linewidth = 2.0) plt.plot([date_s,date_e],[max_beds,max_beds],'--',label = '病床使用率100%',color = 'purple',linewidth = 2.0) # 輸送なし plt.fill_between(times,x_q0025[:,i],x_q0975[:,i],facecolor = 'lime',alpha = 0.5,label = '医療シェアリングなし',) plt.plot(times,x_mean[:,i],"*-",linewidth = 2,color= 'lime') # 輸送あり plt.fill_between(times,y_q0025[:,i],y_q0975[:,i],facecolor = 'orange',alpha = 0.5,label = '医療シェアリングあり',) plt.plot(times,y_mean[:,i],"*-",linewidth = 2,color = 'orange') plt.xlim([date_s,date_e]) plt.ylim([0,1.5*max_beds]) plt.grid() plt.gca().tick_params(axis='x', rotation= -60) plt.title(names[i],fontsize = 20) if i < 42: plt.tick_params(labelbottom=False) if i == 0: plt.legend() if MODE == 'normal': plt.savefig('resultD_transport_strategy_hospital/main/each_severe_{0}_{1:03}.png'.format(x_type,int(gamma*100)),bbox_inches='tight',dpi = 100) plt.savefig('resultD_transport_strategy_hospital/main/each_severe_{0}_{1:03}_{2}.png'.format(x_type,int(gamma*100),forecast_date),bbox_inches='tight',dpi = 100) plt.close() # コスト評価 times = pd.to_datetime(df_xmean.index)[:-1] date_s = min(times) date_e = max(times) max_beds = M.sum() # 輸送人数 plt.plot(times,sum_u[:-1],"*-",linewidth = 2,color= 'black',label = '入院者数') plt.xlim([date_s,date_e]) plt.gca().tick_params(axis='x', rotation= -60) # plt.title('',fontsize = 20) plt.ylabel('毎日の医療シェアが必要な入院者の合計 [人]') plt.legend() if MODE == 'normal': plt.savefig('resultD_transport_strategy_hospital/cost/num_{0}_{1:03}.png'.format(x_type,int(gamma*100)),bbox_inches='tight',dpi = 100) plt.savefig('resultD_transport_strategy_hospital/cost/num_{0}_{1:03}_{2}.png'.format(x_type,int(gamma*100),forecast_date),bbox_inches='tight',dpi = 100) plt.close() times = pd.to_datetime(df_xmean.index)[:-1] date_s = min(times) date_e = max(times) max_beds = M.sum() # 輸送コスト plt.plot(times,sum_cost[:-1],"*-",linewidth = 2,color= 'black',label = '医療シェアリングのコスト') plt.xlim([date_s,date_e]) plt.gca().tick_params(axis='x', rotation= -60) plt.legend() plt.ylabel('毎日のコスト [km]') if MODE == 'normal': plt.savefig('resultD_transport_strategy_hospital/cost/cost_{0}_{1:03}.png'.format(x_type,int(gamma*100)),bbox_inches='tight',dpi = 100) plt.savefig('resultD_transport_strategy_hospital/cost/cost_{0}_{1:03}_{2}.png'.format(x_type,int(gamma*100),forecast_date),bbox_inches='tight',dpi = 100) plt.close() times = pd.to_datetime(df_xmean.index)[:-1] date_s = min(times) date_e = max(times) max_beds = M.sum() # 輸送コスト plt.plot(times,sum_cost[:-1]/sum_u[:-1],"*-",linewidth = 2,color= 'black',label = '入院者ごとの依頼コスト') plt.xlim([date_s,date_e]) plt.gca().tick_params(axis='x', rotation= -60) plt.legend() plt.ylabel('入院者ごとの依頼コスト [km/人]') if MODE == 'normal': plt.savefig('resultD_transport_strategy_hospital/cost/performance_{0}_{1:03}.png'.format(x_type,int(gamma*100)),bbox_inches='tight',dpi = 100) plt.savefig('resultD_transport_strategy_hospital/cost/performance_{0}_{1:03}_{2}.png'.format(x_type,int(gamma*100),forecast_date),bbox_inches='tight',dpi = 100) plt.close() times = pd.to_datetime(df_xmean.index) plt.plot(times,gammas*100) plt.gca().tick_params(axis='x', rotation= -60) plt.ylabel('病床利用率の上限 [%]') if MODE == 'normal': plt.savefig('resultD_transport_strategy_hospital/cost/gammas_{0}_{1:03}.png'.format(x_type,int(gamma*100)),bbox_inches='tight',dpi = 300) plt.savefig('resultD_transport_strategy_hospital/cost/gammas_{0}_{1:03}_{2}.png'.format(x_type,int(gamma*100),forecast_date),bbox_inches='tight',dpi = 300) plt.close() # 各県の搬送数 U = uv.reshape(T,N,N) U_sum = np.zeros(U.shape) U_sum[0] = U[0] for i in range(U_sum.shape[0]-1): U_sum[i+1] = U_sum[i] + U[i+1] times_U = np.sum(U_sum>0,axis=0) for target in range(N): # if sum(U[:,target,:].sum(0)>0) >0: plt.figure(figsize = (10,6)) times = pd.to_datetime(df_xmean.index)[:-1] num_U=
np.sum(times_U[target] !=0)
numpy.sum